aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <srostedt@redhat.com>2013-03-05 18:25:02 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-03-15 00:35:48 -0400
commit45ad21ca5530efdca6a19e4a5ac5e7bd6e24f996 (patch)
tree7bc01dd23c577dae038281be2f644ef3e3e1354e /kernel/trace/trace.c
parent6de58e6269cd0568ca5fbae14423914eff0f7811 (diff)
tracing: Have trace_array keep track if snapshot buffer is allocated
The snapshot buffer belongs to the trace array not the tracer that is running. The trace array should be the data structure that keeps track of whether or not the snapshot buffer is allocated, not the tracer desciptor. Having the trace array keep track of it makes modifications so much easier. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9bb0b52cbd32..bcc9460c2d65 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -667,7 +667,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
667 667
668 WARN_ON_ONCE(!irqs_disabled()); 668 WARN_ON_ONCE(!irqs_disabled());
669 669
670 if (!tr->current_trace->allocated_snapshot) { 670 if (!tr->allocated_snapshot) {
671 /* Only the nop tracer should hit this when disabling */ 671 /* Only the nop tracer should hit this when disabling */
672 WARN_ON_ONCE(tr->current_trace != &nop_trace); 672 WARN_ON_ONCE(tr->current_trace != &nop_trace);
673 return; 673 return;
@@ -700,7 +700,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
700 return; 700 return;
701 701
702 WARN_ON_ONCE(!irqs_disabled()); 702 WARN_ON_ONCE(!irqs_disabled());
703 if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot)) 703 if (WARN_ON_ONCE(!tr->allocated_snapshot))
704 return; 704 return;
705 705
706 arch_spin_lock(&ftrace_max_lock); 706 arch_spin_lock(&ftrace_max_lock);
@@ -802,7 +802,7 @@ int register_tracer(struct tracer *type)
802 if (ring_buffer_expanded) 802 if (ring_buffer_expanded)
803 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 803 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
804 RING_BUFFER_ALL_CPUS); 804 RING_BUFFER_ALL_CPUS);
805 type->allocated_snapshot = true; 805 tr->allocated_snapshot = true;
806 } 806 }
807#endif 807#endif
808 808
@@ -822,7 +822,7 @@ int register_tracer(struct tracer *type)
822 822
823#ifdef CONFIG_TRACER_MAX_TRACE 823#ifdef CONFIG_TRACER_MAX_TRACE
824 if (type->use_max_tr) { 824 if (type->use_max_tr) {
825 type->allocated_snapshot = false; 825 tr->allocated_snapshot = false;
826 826
827 /* Shrink the max buffer again */ 827 /* Shrink the max buffer again */
828 if (ring_buffer_expanded) 828 if (ring_buffer_expanded)
@@ -2463,7 +2463,7 @@ static void show_snapshot_percpu_help(struct seq_file *m)
2463 2463
2464static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 2464static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2465{ 2465{
2466 if (iter->trace->allocated_snapshot) 2466 if (iter->tr->allocated_snapshot)
2467 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); 2467 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2468 else 2468 else
2469 seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); 2469 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
@@ -3364,12 +3364,12 @@ static int tracing_set_tracer(const char *buf)
3364 if (tr->current_trace->reset) 3364 if (tr->current_trace->reset)
3365 tr->current_trace->reset(tr); 3365 tr->current_trace->reset(tr);
3366 3366
3367#ifdef CONFIG_TRACER_MAX_TRACE
3368 had_max_tr = tr->current_trace->allocated_snapshot;
3369
3370 /* Current trace needs to be nop_trace before synchronize_sched */ 3367 /* Current trace needs to be nop_trace before synchronize_sched */
3371 tr->current_trace = &nop_trace; 3368 tr->current_trace = &nop_trace;
3372 3369
3370#ifdef CONFIG_TRACER_MAX_TRACE
3371 had_max_tr = tr->allocated_snapshot;
3372
3373 if (had_max_tr && !t->use_max_tr) { 3373 if (had_max_tr && !t->use_max_tr) {
3374 /* 3374 /*
3375 * We need to make sure that the update_max_tr sees that 3375 * We need to make sure that the update_max_tr sees that
@@ -3387,10 +3387,8 @@ static int tracing_set_tracer(const char *buf)
3387 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 3387 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
3388 set_buffer_entries(&tr->max_buffer, 1); 3388 set_buffer_entries(&tr->max_buffer, 1);
3389 tracing_reset_online_cpus(&tr->max_buffer); 3389 tracing_reset_online_cpus(&tr->max_buffer);
3390 tr->current_trace->allocated_snapshot = false; 3390 tr->allocated_snapshot = false;
3391 } 3391 }
3392#else
3393 tr->current_trace = &nop_trace;
3394#endif 3392#endif
3395 destroy_trace_option_files(topts); 3393 destroy_trace_option_files(topts);
3396 3394
@@ -3403,7 +3401,7 @@ static int tracing_set_tracer(const char *buf)
3403 RING_BUFFER_ALL_CPUS); 3401 RING_BUFFER_ALL_CPUS);
3404 if (ret < 0) 3402 if (ret < 0)
3405 goto out; 3403 goto out;
3406 t->allocated_snapshot = true; 3404 tr->allocated_snapshot = true;
3407 } 3405 }
3408#endif 3406#endif
3409 3407
@@ -4275,13 +4273,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4275 ret = -EINVAL; 4273 ret = -EINVAL;
4276 break; 4274 break;
4277 } 4275 }
4278 if (tr->current_trace->allocated_snapshot) { 4276 if (tr->allocated_snapshot) {
4279 /* free spare buffer */ 4277 /* free spare buffer */
4280 ring_buffer_resize(tr->max_buffer.buffer, 1, 4278 ring_buffer_resize(tr->max_buffer.buffer, 1,
4281 RING_BUFFER_ALL_CPUS); 4279 RING_BUFFER_ALL_CPUS);
4282 set_buffer_entries(&tr->max_buffer, 1); 4280 set_buffer_entries(&tr->max_buffer, 1);
4283 tracing_reset_online_cpus(&tr->max_buffer); 4281 tracing_reset_online_cpus(&tr->max_buffer);
4284 tr->current_trace->allocated_snapshot = false; 4282 tr->allocated_snapshot = false;
4285 } 4283 }
4286 break; 4284 break;
4287 case 1: 4285 case 1:
@@ -4292,13 +4290,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4292 break; 4290 break;
4293 } 4291 }
4294#endif 4292#endif
4295 if (!tr->current_trace->allocated_snapshot) { 4293 if (!tr->allocated_snapshot) {
4296 /* allocate spare buffer */ 4294 /* allocate spare buffer */
4297 ret = resize_buffer_duplicate_size(&tr->max_buffer, 4295 ret = resize_buffer_duplicate_size(&tr->max_buffer,
4298 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); 4296 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
4299 if (ret < 0) 4297 if (ret < 0)
4300 break; 4298 break;
4301 tr->current_trace->allocated_snapshot = true; 4299 tr->allocated_snapshot = true;
4302 } 4300 }
4303 local_irq_disable(); 4301 local_irq_disable();
4304 /* Now, we're going to swap */ 4302 /* Now, we're going to swap */
@@ -4309,7 +4307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4309 local_irq_enable(); 4307 local_irq_enable();
4310 break; 4308 break;
4311 default: 4309 default:
4312 if (tr->current_trace->allocated_snapshot) { 4310 if (tr->allocated_snapshot) {
4313 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4311 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4314 tracing_reset_online_cpus(&tr->max_buffer); 4312 tracing_reset_online_cpus(&tr->max_buffer);
4315 else 4313 else