aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-02-01 18:38:47 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-02-01 18:38:47 -0500
commitd840f718d28715a9833c1a8f46c2493ff3fd219b (patch)
tree6fb0115db3dabd5ce79563fb1412c98edc24088e /kernel/trace/trace.c
parentc1043fcda1b9e8e5144cfdaee7be262c50dbdead (diff)
tracing: Init current_trace to nop_trace and remove NULL checks
On early boot up, when the ftrace ring buffer is initialized, the static variable current_trace is initialized to &nop_trace. Before this initialization, current_trace is NULL and will never become NULL again. It is always reassigned to a ftrace tracer. Several places check if current_trace is NULL before it uses it, and this check is frivolous, because at the point in time when the checks are made the only way current_trace could be NULL is if ftrace failed its allocations at boot up, and the paths to these locations would probably not be possible. By initializing current_trace to &nop_trace where it is declared, current_trace will never be NULL, and we can remove all these checks of current_trace being NULL which never needed to be checked in the first place. Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 70dce64b9ecf..5d520b7bb4c5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -249,7 +249,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
249static struct tracer *trace_types __read_mostly; 249static struct tracer *trace_types __read_mostly;
250 250
251/* current_trace points to the tracer that is currently active */ 251/* current_trace points to the tracer that is currently active */
252static struct tracer *current_trace __read_mostly; 252static struct tracer *current_trace __read_mostly = &nop_trace;
253 253
254/* 254/*
255 * trace_types_lock is used to protect the trace_types list. 255 * trace_types_lock is used to protect the trace_types list.
@@ -2100,8 +2100,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2100 unsigned long total; 2100 unsigned long total;
2101 const char *name = "preemption"; 2101 const char *name = "preemption";
2102 2102
2103 if (type) 2103 name = type->name;
2104 name = type->name;
2105 2104
2106 get_total_entries(tr, &total, &entries); 2105 get_total_entries(tr, &total, &entries);
2107 2106
@@ -2477,13 +2476,12 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2477 if (!iter->trace) 2476 if (!iter->trace)
2478 goto fail; 2477 goto fail;
2479 2478
2480 if (current_trace) 2479 *iter->trace = *current_trace;
2481 *iter->trace = *current_trace;
2482 2480
2483 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2481 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2484 goto fail; 2482 goto fail;
2485 2483
2486 if ((current_trace && current_trace->print_max) || snapshot) 2484 if (current_trace->print_max || snapshot)
2487 iter->tr = &max_tr; 2485 iter->tr = &max_tr;
2488 else 2486 else
2489 iter->tr = &global_trace; 2487 iter->tr = &global_trace;
@@ -3037,10 +3035,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
3037 int r; 3035 int r;
3038 3036
3039 mutex_lock(&trace_types_lock); 3037 mutex_lock(&trace_types_lock);
3040 if (current_trace) 3038 r = sprintf(buf, "%s\n", current_trace->name);
3041 r = sprintf(buf, "%s\n", current_trace->name);
3042 else
3043 r = sprintf(buf, "\n");
3044 mutex_unlock(&trace_types_lock); 3039 mutex_unlock(&trace_types_lock);
3045 3040
3046 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3041 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -3231,10 +3226,10 @@ static int tracing_set_tracer(const char *buf)
3231 goto out; 3226 goto out;
3232 3227
3233 trace_branch_disable(); 3228 trace_branch_disable();
3234 if (current_trace && current_trace->reset) 3229 if (current_trace->reset)
3235 current_trace->reset(tr); 3230 current_trace->reset(tr);
3236 3231
3237 had_max_tr = current_trace && current_trace->allocated_snapshot; 3232 had_max_tr = current_trace->allocated_snapshot;
3238 current_trace = &nop_trace; 3233 current_trace = &nop_trace;
3239 3234
3240 if (had_max_tr && !t->use_max_tr) { 3235 if (had_max_tr && !t->use_max_tr) {
@@ -3373,8 +3368,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3373 ret = -ENOMEM; 3368 ret = -ENOMEM;
3374 goto fail; 3369 goto fail;
3375 } 3370 }
3376 if (current_trace) 3371 *iter->trace = *current_trace;
3377 *iter->trace = *current_trace;
3378 3372
3379 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3373 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3380 ret = -ENOMEM; 3374 ret = -ENOMEM;
@@ -3525,7 +3519,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
3525 3519
3526 /* copy the tracer to avoid using a global lock all around */ 3520 /* copy the tracer to avoid using a global lock all around */
3527 mutex_lock(&trace_types_lock); 3521 mutex_lock(&trace_types_lock);
3528 if (unlikely(current_trace && iter->trace->name != current_trace->name)) 3522 if (unlikely(iter->trace->name != current_trace->name))
3529 *iter->trace = *current_trace; 3523 *iter->trace = *current_trace;
3530 mutex_unlock(&trace_types_lock); 3524 mutex_unlock(&trace_types_lock);
3531 3525
@@ -3691,7 +3685,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3691 3685
3692 /* copy the tracer to avoid using a global lock all around */ 3686 /* copy the tracer to avoid using a global lock all around */
3693 mutex_lock(&trace_types_lock); 3687 mutex_lock(&trace_types_lock);
3694 if (unlikely(current_trace && iter->trace->name != current_trace->name)) 3688 if (unlikely(iter->trace->name != current_trace->name))
3695 *iter->trace = *current_trace; 3689 *iter->trace = *current_trace;
3696 mutex_unlock(&trace_types_lock); 3690 mutex_unlock(&trace_types_lock);
3697 3691
@@ -4115,7 +4109,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4115 4109
4116 mutex_lock(&trace_types_lock); 4110 mutex_lock(&trace_types_lock);
4117 4111
4118 if (current_trace && current_trace->use_max_tr) { 4112 if (current_trace->use_max_tr) {
4119 ret = -EBUSY; 4113 ret = -EBUSY;
4120 goto out; 4114 goto out;
4121 } 4115 }
@@ -5299,7 +5293,7 @@ __init static int tracer_alloc_buffers(void)
5299 init_irq_work(&trace_work_wakeup, trace_wake_up); 5293 init_irq_work(&trace_work_wakeup, trace_wake_up);
5300 5294
5301 register_tracer(&nop_trace); 5295 register_tracer(&nop_trace);
5302 current_trace = &nop_trace; 5296
5303 /* All seems OK, enable tracing */ 5297 /* All seems OK, enable tracing */
5304 tracing_disabled = 0; 5298 tracing_disabled = 0;
5305 5299