aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-10-31 08:20:08 -0400
committerIngo Molnar <mingo@elte.hu>2008-11-04 11:14:06 -0500
commitd7ad44b697c9d13e445ddc7d16f736fbac333249 (patch)
treea18ac8995bf7158835c69ca1c9ab9b674fc617fa /kernel/trace/trace_sched_switch.c
parente55f605c14679c30be41473e60b7ad26524cdc35 (diff)
tracing/fastboot: use sched switch tracer from boot tracer
Impact: enhance boot trace output with scheduling events Use the sched_switch tracer from the boot tracer. We also can trace schedule events inside the initcalls. Sched tracing is disabled after the initcall has finished and then reenabled before the next one is started. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 96620c714300..9d7bdac331dd 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -127,6 +127,7 @@ static void tracing_start_sched_switch(void)
127 long ref; 127 long ref;
128 128
129 mutex_lock(&tracepoint_mutex); 129 mutex_lock(&tracepoint_mutex);
130 tracer_enabled = 1;
130 ref = atomic_inc_return(&sched_ref); 131 ref = atomic_inc_return(&sched_ref);
131 if (ref == 1) 132 if (ref == 1)
132 tracing_sched_register(); 133 tracing_sched_register();
@@ -138,6 +139,7 @@ static void tracing_stop_sched_switch(void)
138 long ref; 139 long ref;
139 140
140 mutex_lock(&tracepoint_mutex); 141 mutex_lock(&tracepoint_mutex);
142 tracer_enabled = 0;
141 ref = atomic_dec_and_test(&sched_ref); 143 ref = atomic_dec_and_test(&sched_ref);
142 if (ref) 144 if (ref)
143 tracing_sched_unregister(); 145 tracing_sched_unregister();
@@ -158,12 +160,10 @@ static void start_sched_trace(struct trace_array *tr)
158{ 160{
159 sched_switch_reset(tr); 161 sched_switch_reset(tr);
160 tracing_start_cmdline_record(); 162 tracing_start_cmdline_record();
161 tracer_enabled = 1;
162} 163}
163 164
164static void stop_sched_trace(struct trace_array *tr) 165static void stop_sched_trace(struct trace_array *tr)
165{ 166{
166 tracer_enabled = 0;
167 tracing_stop_cmdline_record(); 167 tracing_stop_cmdline_record();
168} 168}
169 169
@@ -190,7 +190,7 @@ static void sched_switch_trace_ctrl_update(struct trace_array *tr)
190 stop_sched_trace(tr); 190 stop_sched_trace(tr);
191} 191}
192 192
193static struct tracer sched_switch_trace __read_mostly = 193struct tracer sched_switch_trace __read_mostly =
194{ 194{
195 .name = "sched_switch", 195 .name = "sched_switch",
196 .init = sched_switch_trace_init, 196 .init = sched_switch_trace_init,