diff options
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 24 |
1 files changed, 10 insertions, 14 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index df175cb4564f..de35f200abd3 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -18,6 +18,7 @@ static struct trace_array *ctx_trace; | |||
| 18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
| 19 | static int sched_ref; | 19 | static int sched_ref; |
| 20 | static DEFINE_MUTEX(sched_register_mutex); | 20 | static DEFINE_MUTEX(sched_register_mutex); |
| 21 | static int sched_stopped; | ||
| 21 | 22 | ||
| 22 | static void | 23 | static void |
| 23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 24 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
| @@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
| 28 | int cpu; | 29 | int cpu; |
| 29 | int pc; | 30 | int pc; |
| 30 | 31 | ||
| 31 | if (!sched_ref) | 32 | if (!sched_ref || sched_stopped) |
| 32 | return; | 33 | return; |
| 33 | 34 | ||
| 34 | tracing_record_cmdline(prev); | 35 | tracing_record_cmdline(prev); |
| @@ -43,7 +44,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
| 43 | data = ctx_trace->data[cpu]; | 44 | data = ctx_trace->data[cpu]; |
| 44 | 45 | ||
| 45 | if (likely(!atomic_read(&data->disabled))) | 46 | if (likely(!atomic_read(&data->disabled))) |
| 46 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); | 47 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
| 47 | 48 | ||
| 48 | local_irq_restore(flags); | 49 | local_irq_restore(flags); |
| 49 | } | 50 | } |
| @@ -66,7 +67,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | |||
| 66 | data = ctx_trace->data[cpu]; | 67 | data = ctx_trace->data[cpu]; |
| 67 | 68 | ||
| 68 | if (likely(!atomic_read(&data->disabled))) | 69 | if (likely(!atomic_read(&data->disabled))) |
| 69 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, | 70 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
| 70 | flags, pc); | 71 | flags, pc); |
| 71 | 72 | ||
| 72 | local_irq_restore(flags); | 73 | local_irq_restore(flags); |
| @@ -93,7 +94,7 @@ static int tracing_sched_register(void) | |||
| 93 | ret = register_trace_sched_switch(probe_sched_switch); | 94 | ret = register_trace_sched_switch(probe_sched_switch); |
| 94 | if (ret) { | 95 | if (ret) { |
| 95 | pr_info("sched trace: Couldn't activate tracepoint" | 96 | pr_info("sched trace: Couldn't activate tracepoint" |
| 96 | " probe to kernel_sched_schedule\n"); | 97 | " probe to kernel_sched_switch\n"); |
| 97 | goto fail_deprobe_wake_new; | 98 | goto fail_deprobe_wake_new; |
| 98 | } | 99 | } |
| 99 | 100 | ||
| @@ -185,12 +186,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr) | |||
| 185 | ctx_trace = tr; | 186 | ctx_trace = tr; |
| 186 | } | 187 | } |
| 187 | 188 | ||
| 188 | static void start_sched_trace(struct trace_array *tr) | ||
| 189 | { | ||
| 190 | tracing_reset_online_cpus(tr); | ||
| 191 | tracing_start_sched_switch_record(); | ||
| 192 | } | ||
| 193 | |||
| 194 | static void stop_sched_trace(struct trace_array *tr) | 189 | static void stop_sched_trace(struct trace_array *tr) |
| 195 | { | 190 | { |
| 196 | tracing_stop_sched_switch_record(); | 191 | tracing_stop_sched_switch_record(); |
| @@ -199,7 +194,8 @@ static void stop_sched_trace(struct trace_array *tr) | |||
| 199 | static int sched_switch_trace_init(struct trace_array *tr) | 194 | static int sched_switch_trace_init(struct trace_array *tr) |
| 200 | { | 195 | { |
| 201 | ctx_trace = tr; | 196 | ctx_trace = tr; |
| 202 | start_sched_trace(tr); | 197 | tracing_reset_online_cpus(tr); |
| 198 | tracing_start_sched_switch_record(); | ||
| 203 | return 0; | 199 | return 0; |
| 204 | } | 200 | } |
| 205 | 201 | ||
| @@ -211,13 +207,12 @@ static void sched_switch_trace_reset(struct trace_array *tr) | |||
| 211 | 207 | ||
| 212 | static void sched_switch_trace_start(struct trace_array *tr) | 208 | static void sched_switch_trace_start(struct trace_array *tr) |
| 213 | { | 209 | { |
| 214 | tracing_reset_online_cpus(tr); | 210 | sched_stopped = 0; |
| 215 | tracing_start_sched_switch(); | ||
| 216 | } | 211 | } |
| 217 | 212 | ||
| 218 | static void sched_switch_trace_stop(struct trace_array *tr) | 213 | static void sched_switch_trace_stop(struct trace_array *tr) |
| 219 | { | 214 | { |
| 220 | tracing_stop_sched_switch(); | 215 | sched_stopped = 1; |
| 221 | } | 216 | } |
| 222 | 217 | ||
| 223 | static struct tracer sched_switch_trace __read_mostly = | 218 | static struct tracer sched_switch_trace __read_mostly = |
| @@ -227,6 +222,7 @@ static struct tracer sched_switch_trace __read_mostly = | |||
| 227 | .reset = sched_switch_trace_reset, | 222 | .reset = sched_switch_trace_reset, |
| 228 | .start = sched_switch_trace_start, | 223 | .start = sched_switch_trace_start, |
| 229 | .stop = sched_switch_trace_stop, | 224 | .stop = sched_switch_trace_stop, |
| 225 | .wait_pipe = poll_wait_pipe, | ||
| 230 | #ifdef CONFIG_FTRACE_SELFTEST | 226 | #ifdef CONFIG_FTRACE_SELFTEST |
| 231 | .selftest = trace_selftest_startup_sched_switch, | 227 | .selftest = trace_selftest_startup_sched_switch, |
| 232 | #endif | 228 | #endif |
