aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <srostedt@redhat.com>2013-03-05 21:13:47 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-03-15 00:35:49 -0400
commit737223fbca3b1c91feb947c7f571b35749b743b6 (patch)
treeff7a4e73785e909c5dd27584f6bfa5aab21a7153 /kernel/trace
parent45ad21ca5530efdca6a19e4a5ac5e7bd6e24f996 (diff)
tracing: Consolidate buffer allocation code
There's a bit of duplicate code in creating the trace buffers for the normal trace buffer and the max trace buffer among the instances and the main global_trace. This code can be consolidated and cleaned up a bit making the code cleaner and more readable as well as less duplication. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c130
1 files changed, 63 insertions, 67 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bcc9460c2d65..57895d476509 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3171,6 +3171,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
3171static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) 3171static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3172{ 3172{
3173 int cpu; 3173 int cpu;
3174
3174 for_each_tracing_cpu(cpu) 3175 for_each_tracing_cpu(cpu)
3175 per_cpu_ptr(buf->data, cpu)->entries = val; 3176 per_cpu_ptr(buf->data, cpu)->entries = val;
3176} 3177}
@@ -5267,12 +5268,70 @@ struct dentry *trace_instance_dir;
5267static void 5268static void
5268init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5269init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5269 5270
5270static int new_instance_create(const char *name) 5271static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5272{
5273 int cpu;
5274
5275 for_each_tracing_cpu(cpu) {
5276 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5277 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5278 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5279 }
5280}
5281
5282static int allocate_trace_buffers(struct trace_array *tr, int size)
5271{ 5283{
5272 enum ring_buffer_flags rb_flags; 5284 enum ring_buffer_flags rb_flags;
5285
5286 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5287
5288 tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags);
5289 if (!tr->trace_buffer.buffer)
5290 goto out_free;
5291
5292 tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5293 if (!tr->trace_buffer.data)
5294 goto out_free;
5295
5296 init_trace_buffers(tr, &tr->trace_buffer);
5297
5298 /* Allocate the first page for all buffers */
5299 set_buffer_entries(&tr->trace_buffer,
5300 ring_buffer_size(tr->trace_buffer.buffer, 0));
5301
5302#ifdef CONFIG_TRACER_MAX_TRACE
5303
5304 tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
5305 if (!tr->max_buffer.buffer)
5306 goto out_free;
5307
5308 tr->max_buffer.data = alloc_percpu(struct trace_array_cpu);
5309 if (!tr->max_buffer.data)
5310 goto out_free;
5311
5312 init_trace_buffers(tr, &tr->max_buffer);
5313
5314 set_buffer_entries(&tr->max_buffer, 1);
5315#endif
5316 return 0;
5317
5318 out_free:
5319 if (tr->trace_buffer.buffer)
5320 ring_buffer_free(tr->trace_buffer.buffer);
5321 free_percpu(tr->trace_buffer.data);
5322
5323#ifdef CONFIG_TRACER_MAX_TRACE
5324 if (tr->max_buffer.buffer)
5325 ring_buffer_free(tr->max_buffer.buffer);
5326 free_percpu(tr->max_buffer.data);
5327#endif
5328 return -ENOMEM;
5329}
5330
5331static int new_instance_create(const char *name)
5332{
5273 struct trace_array *tr; 5333 struct trace_array *tr;
5274 int ret; 5334 int ret;
5275 int i;
5276 5335
5277 mutex_lock(&trace_types_lock); 5336 mutex_lock(&trace_types_lock);
5278 5337
@@ -5298,22 +5357,9 @@ static int new_instance_create(const char *name)
5298 INIT_LIST_HEAD(&tr->systems); 5357 INIT_LIST_HEAD(&tr->systems);
5299 INIT_LIST_HEAD(&tr->events); 5358 INIT_LIST_HEAD(&tr->events);
5300 5359
5301 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5360 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5302
5303 tr->trace_buffer.buffer = ring_buffer_alloc(trace_buf_size, rb_flags);
5304 if (!tr->trace_buffer.buffer)
5305 goto out_free_tr;
5306
5307 tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5308 if (!tr->trace_buffer.data)
5309 goto out_free_tr; 5361 goto out_free_tr;
5310 5362
5311 for_each_tracing_cpu(i) {
5312 memset(per_cpu_ptr(tr->trace_buffer.data, i), 0, sizeof(struct trace_array_cpu));
5313 per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.cpu = i;
5314 per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.tr = tr;
5315 }
5316
5317 /* Holder for file callbacks */ 5363 /* Holder for file callbacks */
5318 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS; 5364 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5319 tr->trace_cpu.tr = tr; 5365 tr->trace_cpu.tr = tr;
@@ -5736,8 +5782,6 @@ EXPORT_SYMBOL_GPL(ftrace_dump);
5736__init static int tracer_alloc_buffers(void) 5782__init static int tracer_alloc_buffers(void)
5737{ 5783{
5738 int ring_buf_size; 5784 int ring_buf_size;
5739 enum ring_buffer_flags rb_flags;
5740 int i;
5741 int ret = -ENOMEM; 5785 int ret = -ENOMEM;
5742 5786
5743 5787
@@ -5758,69 +5802,21 @@ __init static int tracer_alloc_buffers(void)
5758 else 5802 else
5759 ring_buf_size = 1; 5803 ring_buf_size = 1;
5760 5804
5761 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5762
5763 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 5805 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5764 cpumask_copy(tracing_cpumask, cpu_all_mask); 5806 cpumask_copy(tracing_cpumask, cpu_all_mask);
5765 5807
5766 raw_spin_lock_init(&global_trace.start_lock); 5808 raw_spin_lock_init(&global_trace.start_lock);
5767 5809
5768 /* TODO: make the number of buffers hot pluggable with CPUS */ 5810 /* TODO: make the number of buffers hot pluggable with CPUS */
5769 global_trace.trace_buffer.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); 5811 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
5770 if (!global_trace.trace_buffer.buffer) {
5771 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 5812 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5772 WARN_ON(1); 5813 WARN_ON(1);
5773 goto out_free_cpumask; 5814 goto out_free_cpumask;
5774 } 5815 }
5775 5816
5776 global_trace.trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5777
5778 if (!global_trace.trace_buffer.data) {
5779 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5780 WARN_ON(1);
5781 goto out_free_cpumask;
5782 }
5783
5784 for_each_tracing_cpu(i) {
5785 memset(per_cpu_ptr(global_trace.trace_buffer.data, i), 0,
5786 sizeof(struct trace_array_cpu));
5787 per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.cpu = i;
5788 per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.tr = &global_trace;
5789 }
5790
5791 if (global_trace.buffer_disabled) 5817 if (global_trace.buffer_disabled)
5792 tracing_off(); 5818 tracing_off();
5793 5819
5794#ifdef CONFIG_TRACER_MAX_TRACE
5795 global_trace.max_buffer.data = alloc_percpu(struct trace_array_cpu);
5796 if (!global_trace.max_buffer.data) {
5797 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5798 WARN_ON(1);
5799 goto out_free_cpumask;
5800 }
5801 global_trace.max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
5802 if (!global_trace.max_buffer.buffer) {
5803 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5804 WARN_ON(1);
5805 ring_buffer_free(global_trace.trace_buffer.buffer);
5806 goto out_free_cpumask;
5807 }
5808
5809 for_each_tracing_cpu(i) {
5810 memset(per_cpu_ptr(global_trace.max_buffer.data, i), 0,
5811 sizeof(struct trace_array_cpu));
5812 per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.cpu = i;
5813 per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.tr = &global_trace;
5814 }
5815#endif
5816
5817 /* Allocate the first page for all buffers */
5818 set_buffer_entries(&global_trace.trace_buffer,
5819 ring_buffer_size(global_trace.trace_buffer.buffer, 0));
5820#ifdef CONFIG_TRACER_MAX_TRACE
5821 set_buffer_entries(&global_trace.max_buffer, 1);
5822#endif
5823
5824 trace_init_cmdlines(); 5820 trace_init_cmdlines();
5825 5821
5826 register_tracer(&nop_trace); 5822 register_tracer(&nop_trace);