diff options
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 106 |
1 files changed, 72 insertions, 34 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index b8f56beb1a62..863390557b44 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -16,7 +16,8 @@ | |||
| 16 | 16 | ||
| 17 | static struct trace_array *ctx_trace; | 17 | static struct trace_array *ctx_trace; |
| 18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
| 19 | static atomic_t sched_ref; | 19 | static int sched_ref; |
| 20 | static DEFINE_MUTEX(sched_register_mutex); | ||
| 20 | 21 | ||
| 21 | static void | 22 | static void |
| 22 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
| @@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
| 27 | int cpu; | 28 | int cpu; |
| 28 | int pc; | 29 | int pc; |
| 29 | 30 | ||
| 30 | if (!atomic_read(&sched_ref)) | 31 | if (!sched_ref) |
| 31 | return; | 32 | return; |
| 32 | 33 | ||
| 33 | tracing_record_cmdline(prev); | 34 | tracing_record_cmdline(prev); |
| @@ -123,20 +124,18 @@ static void tracing_sched_unregister(void) | |||
| 123 | 124 | ||
| 124 | static void tracing_start_sched_switch(void) | 125 | static void tracing_start_sched_switch(void) |
| 125 | { | 126 | { |
| 126 | long ref; | 127 | mutex_lock(&sched_register_mutex); |
| 127 | 128 | if (!(sched_ref++)) | |
| 128 | ref = atomic_inc_return(&sched_ref); | ||
| 129 | if (ref == 1) | ||
| 130 | tracing_sched_register(); | 129 | tracing_sched_register(); |
| 130 | mutex_unlock(&sched_register_mutex); | ||
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static void tracing_stop_sched_switch(void) | 133 | static void tracing_stop_sched_switch(void) |
| 134 | { | 134 | { |
| 135 | long ref; | 135 | mutex_lock(&sched_register_mutex); |
| 136 | 136 | if (!(--sched_ref)) | |
| 137 | ref = atomic_dec_and_test(&sched_ref); | ||
| 138 | if (ref) | ||
| 139 | tracing_sched_unregister(); | 137 | tracing_sched_unregister(); |
| 138 | mutex_unlock(&sched_register_mutex); | ||
| 140 | } | 139 | } |
| 141 | 140 | ||
| 142 | void tracing_start_cmdline_record(void) | 141 | void tracing_start_cmdline_record(void) |
| @@ -149,40 +148,86 @@ void tracing_stop_cmdline_record(void) | |||
| 149 | tracing_stop_sched_switch(); | 148 | tracing_stop_sched_switch(); |
| 150 | } | 149 | } |
| 151 | 150 | ||
| 151 | /** | ||
| 152 | * tracing_start_sched_switch_record - start tracing context switches | ||
| 153 | * | ||
| 154 | * Turns on context switch tracing for a tracer. | ||
| 155 | */ | ||
| 156 | void tracing_start_sched_switch_record(void) | ||
| 157 | { | ||
| 158 | if (unlikely(!ctx_trace)) { | ||
| 159 | WARN_ON(1); | ||
| 160 | return; | ||
| 161 | } | ||
| 162 | |||
| 163 | tracing_start_sched_switch(); | ||
| 164 | |||
| 165 | mutex_lock(&sched_register_mutex); | ||
| 166 | tracer_enabled++; | ||
| 167 | mutex_unlock(&sched_register_mutex); | ||
| 168 | } | ||
| 169 | |||
| 170 | /** | ||
| 171 | * tracing_stop_sched_switch_record - start tracing context switches | ||
| 172 | * | ||
| 173 | * Turns off context switch tracing for a tracer. | ||
| 174 | */ | ||
| 175 | void tracing_stop_sched_switch_record(void) | ||
| 176 | { | ||
| 177 | mutex_lock(&sched_register_mutex); | ||
| 178 | tracer_enabled--; | ||
| 179 | WARN_ON(tracer_enabled < 0); | ||
| 180 | mutex_unlock(&sched_register_mutex); | ||
| 181 | |||
| 182 | tracing_stop_sched_switch(); | ||
| 183 | } | ||
| 184 | |||
| 185 | /** | ||
| 186 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | ||
| 187 | * @tr: trace array pointer to assign | ||
| 188 | * | ||
| 189 | * Some tracers might want to record the context switches in their | ||
| 190 | * trace. This function lets those tracers assign the trace array | ||
| 191 | * to use. | ||
| 192 | */ | ||
| 193 | void tracing_sched_switch_assign_trace(struct trace_array *tr) | ||
| 194 | { | ||
| 195 | ctx_trace = tr; | ||
| 196 | } | ||
| 197 | |||
| 152 | static void start_sched_trace(struct trace_array *tr) | 198 | static void start_sched_trace(struct trace_array *tr) |
| 153 | { | 199 | { |
| 154 | sched_switch_reset(tr); | 200 | sched_switch_reset(tr); |
| 155 | tracing_start_cmdline_record(); | 201 | tracing_start_sched_switch_record(); |
| 156 | tracer_enabled = 1; | ||
| 157 | } | 202 | } |
| 158 | 203 | ||
| 159 | static void stop_sched_trace(struct trace_array *tr) | 204 | static void stop_sched_trace(struct trace_array *tr) |
| 160 | { | 205 | { |
| 161 | tracer_enabled = 0; | 206 | tracing_stop_sched_switch_record(); |
| 162 | tracing_stop_cmdline_record(); | ||
| 163 | } | 207 | } |
| 164 | 208 | ||
| 165 | static void sched_switch_trace_init(struct trace_array *tr) | 209 | static int sched_switch_trace_init(struct trace_array *tr) |
| 166 | { | 210 | { |
| 167 | ctx_trace = tr; | 211 | ctx_trace = tr; |
| 168 | 212 | start_sched_trace(tr); | |
| 169 | if (tr->ctrl) | 213 | return 0; |
| 170 | start_sched_trace(tr); | ||
| 171 | } | 214 | } |
| 172 | 215 | ||
| 173 | static void sched_switch_trace_reset(struct trace_array *tr) | 216 | static void sched_switch_trace_reset(struct trace_array *tr) |
| 174 | { | 217 | { |
| 175 | if (tr->ctrl) | 218 | if (sched_ref) |
| 176 | stop_sched_trace(tr); | 219 | stop_sched_trace(tr); |
| 177 | } | 220 | } |
| 178 | 221 | ||
| 179 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | 222 | static void sched_switch_trace_start(struct trace_array *tr) |
| 180 | { | 223 | { |
| 181 | /* When starting a new trace, reset the buffers */ | 224 | sched_switch_reset(tr); |
| 182 | if (tr->ctrl) | 225 | tracing_start_sched_switch(); |
| 183 | start_sched_trace(tr); | 226 | } |
| 184 | else | 227 | |
| 185 | stop_sched_trace(tr); | 228 | static void sched_switch_trace_stop(struct trace_array *tr) |
| 229 | { | ||
| 230 | tracing_stop_sched_switch(); | ||
| 186 | } | 231 | } |
| 187 | 232 | ||
| 188 | static struct tracer sched_switch_trace __read_mostly = | 233 | static struct tracer sched_switch_trace __read_mostly = |
| @@ -190,7 +235,8 @@ static struct tracer sched_switch_trace __read_mostly = | |||
| 190 | .name = "sched_switch", | 235 | .name = "sched_switch", |
| 191 | .init = sched_switch_trace_init, | 236 | .init = sched_switch_trace_init, |
| 192 | .reset = sched_switch_trace_reset, | 237 | .reset = sched_switch_trace_reset, |
| 193 | .ctrl_update = sched_switch_trace_ctrl_update, | 238 | .start = sched_switch_trace_start, |
| 239 | .stop = sched_switch_trace_stop, | ||
| 194 | #ifdef CONFIG_FTRACE_SELFTEST | 240 | #ifdef CONFIG_FTRACE_SELFTEST |
| 195 | .selftest = trace_selftest_startup_sched_switch, | 241 | .selftest = trace_selftest_startup_sched_switch, |
| 196 | #endif | 242 | #endif |
| @@ -198,14 +244,6 @@ static struct tracer sched_switch_trace __read_mostly = | |||
| 198 | 244 | ||
| 199 | __init static int init_sched_switch_trace(void) | 245 | __init static int init_sched_switch_trace(void) |
| 200 | { | 246 | { |
| 201 | int ret = 0; | ||
| 202 | |||
| 203 | if (atomic_read(&sched_ref)) | ||
| 204 | ret = tracing_sched_register(); | ||
| 205 | if (ret) { | ||
| 206 | pr_info("error registering scheduler trace\n"); | ||
| 207 | return ret; | ||
| 208 | } | ||
| 209 | return register_tracer(&sched_switch_trace); | 247 | return register_tracer(&sched_switch_trace); |
| 210 | } | 248 | } |
| 211 | device_initcall(init_sched_switch_trace); | 249 | device_initcall(init_sched_switch_trace); |
