aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-07-23 15:35:03 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-11-11 12:42:44 -0500
commit458faf0b88b19a46d51bb9760fa6e03a1bc6d97b (patch)
treebbb7975f14054631d54ed715f744af61c5f9dae6
parent632537256e9f969a188cc4d0159e0027a459d3e7 (diff)
tracing: Kill the dead code in probe_sched_switch() and probe_sched_wakeup()
After the previous patch it is clear that "tracer_enabled" can never be true, we can remove the "if (tracer_enabled)" code in probe_sched_switch() and probe_sched_wakeup(). Plus we can obviously remove tracer_enabled, ctx_trace, and sched_stopped as well. Link: http://lkml.kernel.org/p/20140723193503.GA30217@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace_sched_switch.c40
1 files changed, 0 insertions, 40 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 3b60301c59d2..f7c7f4f1544c 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -14,12 +14,8 @@
14 14
15#include "trace.h" 15#include "trace.h"
16 16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
19static int sched_ref; 17static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex); 18static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped;
22
23 19
24void 20void
25tracing_sched_switch_trace(struct trace_array *tr, 21tracing_sched_switch_trace(struct trace_array *tr,
@@ -52,29 +48,11 @@ tracing_sched_switch_trace(struct trace_array *tr,
52static void 48static void
53probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) 49probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
54{ 50{
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 int cpu;
58 int pc;
59
60 if (unlikely(!sched_ref)) 51 if (unlikely(!sched_ref))
61 return; 52 return;
62 53
63 tracing_record_cmdline(prev); 54 tracing_record_cmdline(prev);
64 tracing_record_cmdline(next); 55 tracing_record_cmdline(next);
65
66 if (!tracer_enabled || sched_stopped)
67 return;
68
69 pc = preempt_count();
70 local_irq_save(flags);
71 cpu = raw_smp_processor_id();
72 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
73
74 if (likely(!atomic_read(&data->disabled)))
75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
76
77 local_irq_restore(flags);
78} 56}
79 57
80void 58void
@@ -108,28 +86,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
108static void 86static void
109probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) 87probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
110{ 88{
111 struct trace_array_cpu *data;
112 unsigned long flags;
113 int cpu, pc;
114
115 if (unlikely(!sched_ref)) 89 if (unlikely(!sched_ref))
116 return; 90 return;
117 91
118 tracing_record_cmdline(current); 92 tracing_record_cmdline(current);
119
120 if (!tracer_enabled || sched_stopped)
121 return;
122
123 pc = preempt_count();
124 local_irq_save(flags);
125 cpu = raw_smp_processor_id();
126 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
127
128 if (likely(!atomic_read(&data->disabled)))
129 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
130 flags, pc);
131
132 local_irq_restore(flags);
133} 93}
134 94
135static int tracing_sched_register(void) 95static int tracing_sched_register(void)