aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_wakeup.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r--kernel/trace/trace_sched_wakeup.c102
1 files changed, 59 insertions, 43 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 42ae1e77b6b3..5bc00e8f153e 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -25,12 +25,15 @@ static int __read_mostly tracer_enabled;
25static struct task_struct *wakeup_task; 25static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28static int wakeup_rt;
28 29
29static raw_spinlock_t wakeup_lock = 30static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31 32
32static void __wakeup_reset(struct trace_array *tr); 33static void __wakeup_reset(struct trace_array *tr);
33 34
35static int save_lat_flag;
36
34#ifdef CONFIG_FUNCTION_TRACER 37#ifdef CONFIG_FUNCTION_TRACER
35/* 38/*
36 * irqsoff uses its own tracer function to keep the overhead down: 39 * irqsoff uses its own tracer function to keep the overhead down:
@@ -71,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
71 if (task_cpu(wakeup_task) != cpu) 74 if (task_cpu(wakeup_task) != cpu)
72 goto unlock; 75 goto unlock;
73 76
74 trace_function(tr, data, ip, parent_ip, flags, pc); 77 trace_function(tr, ip, parent_ip, flags, pc);
75 78
76 unlock: 79 unlock:
77 __raw_spin_unlock(&wakeup_lock); 80 __raw_spin_unlock(&wakeup_lock);
@@ -151,7 +154,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
151 if (unlikely(!tracer_enabled || next != wakeup_task)) 154 if (unlikely(!tracer_enabled || next != wakeup_task))
152 goto out_unlock; 155 goto out_unlock;
153 156
154 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 157 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
158 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
155 159
156 /* 160 /*
157 * usecs conversion is slow so we try to delay the conversion 161 * usecs conversion is slow so we try to delay the conversion
@@ -182,13 +186,10 @@ out:
182 186
183static void __wakeup_reset(struct trace_array *tr) 187static void __wakeup_reset(struct trace_array *tr)
184{ 188{
185 struct trace_array_cpu *data;
186 int cpu; 189 int cpu;
187 190
188 for_each_possible_cpu(cpu) { 191 for_each_possible_cpu(cpu)
189 data = tr->data[cpu];
190 tracing_reset(tr, cpu); 192 tracing_reset(tr, cpu);
191 }
192 193
193 wakeup_cpu = -1; 194 wakeup_cpu = -1;
194 wakeup_prio = -1; 195 wakeup_prio = -1;
@@ -213,6 +214,7 @@ static void wakeup_reset(struct trace_array *tr)
213static void 214static void
214probe_wakeup(struct rq *rq, struct task_struct *p, int success) 215probe_wakeup(struct rq *rq, struct task_struct *p, int success)
215{ 216{
217 struct trace_array_cpu *data;
216 int cpu = smp_processor_id(); 218 int cpu = smp_processor_id();
217 unsigned long flags; 219 unsigned long flags;
218 long disabled; 220 long disabled;
@@ -224,7 +226,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
224 tracing_record_cmdline(p); 226 tracing_record_cmdline(p);
225 tracing_record_cmdline(current); 227 tracing_record_cmdline(current);
226 228
227 if (likely(!rt_task(p)) || 229 if ((wakeup_rt && !rt_task(p)) ||
228 p->prio >= wakeup_prio || 230 p->prio >= wakeup_prio ||
229 p->prio >= current->prio) 231 p->prio >= current->prio)
230 return; 232 return;
@@ -252,9 +254,16 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
252 254
253 local_save_flags(flags); 255 local_save_flags(flags);
254 256
255 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 257 data = wakeup_trace->data[wakeup_cpu];
256 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], 258 data->preempt_timestamp = ftrace_now(cpu);
257 CALLER_ADDR1, CALLER_ADDR2, flags, pc); 259 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
260
261 /*
262 * We must be careful in using CALLER_ADDR2. But since wake_up
263 * is not called by an assembly function (where as schedule is)
264 * it should be safe to use it here.
265 */
266 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
258 267
259out_locked: 268out_locked:
260 __raw_spin_unlock(&wakeup_lock); 269 __raw_spin_unlock(&wakeup_lock);
@@ -262,12 +271,6 @@ out:
262 atomic_dec(&wakeup_trace->data[cpu]->disabled); 271 atomic_dec(&wakeup_trace->data[cpu]->disabled);
263} 272}
264 273
265/*
266 * save_tracer_enabled is used to save the state of the tracer_enabled
267 * variable when we disable it when we open a trace output file.
268 */
269static int save_tracer_enabled;
270
271static void start_wakeup_tracer(struct trace_array *tr) 274static void start_wakeup_tracer(struct trace_array *tr)
272{ 275{
273 int ret; 276 int ret;
@@ -289,7 +292,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
289 ret = register_trace_sched_switch(probe_wakeup_sched_switch); 292 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
290 if (ret) { 293 if (ret) {
291 pr_info("sched trace: Couldn't activate tracepoint" 294 pr_info("sched trace: Couldn't activate tracepoint"
292 " probe to kernel_sched_schedule\n"); 295 " probe to kernel_sched_switch\n");
293 goto fail_deprobe_wake_new; 296 goto fail_deprobe_wake_new;
294 } 297 }
295 298
@@ -306,13 +309,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
306 309
307 register_ftrace_function(&trace_ops); 310 register_ftrace_function(&trace_ops);
308 311
309 if (tracing_is_enabled()) { 312 if (tracing_is_enabled())
310 tracer_enabled = 1; 313 tracer_enabled = 1;
311 save_tracer_enabled = 1; 314 else
312 } else {
313 tracer_enabled = 0; 315 tracer_enabled = 0;
314 save_tracer_enabled = 0;
315 }
316 316
317 return; 317 return;
318fail_deprobe_wake_new: 318fail_deprobe_wake_new:
@@ -324,54 +324,54 @@ fail_deprobe:
324static void stop_wakeup_tracer(struct trace_array *tr) 324static void stop_wakeup_tracer(struct trace_array *tr)
325{ 325{
326 tracer_enabled = 0; 326 tracer_enabled = 0;
327 save_tracer_enabled = 0;
328 unregister_ftrace_function(&trace_ops); 327 unregister_ftrace_function(&trace_ops);
329 unregister_trace_sched_switch(probe_wakeup_sched_switch); 328 unregister_trace_sched_switch(probe_wakeup_sched_switch);
330 unregister_trace_sched_wakeup_new(probe_wakeup); 329 unregister_trace_sched_wakeup_new(probe_wakeup);
331 unregister_trace_sched_wakeup(probe_wakeup); 330 unregister_trace_sched_wakeup(probe_wakeup);
332} 331}
333 332
334static int wakeup_tracer_init(struct trace_array *tr) 333static int __wakeup_tracer_init(struct trace_array *tr)
335{ 334{
335 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
336 trace_flags |= TRACE_ITER_LATENCY_FMT;
337
336 tracing_max_latency = 0; 338 tracing_max_latency = 0;
337 wakeup_trace = tr; 339 wakeup_trace = tr;
338 start_wakeup_tracer(tr); 340 start_wakeup_tracer(tr);
339 return 0; 341 return 0;
340} 342}
341 343
344static int wakeup_tracer_init(struct trace_array *tr)
345{
346 wakeup_rt = 0;
347 return __wakeup_tracer_init(tr);
348}
349
350static int wakeup_rt_tracer_init(struct trace_array *tr)
351{
352 wakeup_rt = 1;
353 return __wakeup_tracer_init(tr);
354}
355
342static void wakeup_tracer_reset(struct trace_array *tr) 356static void wakeup_tracer_reset(struct trace_array *tr)
343{ 357{
344 stop_wakeup_tracer(tr); 358 stop_wakeup_tracer(tr);
345 /* make sure we put back any tasks we are tracing */ 359 /* make sure we put back any tasks we are tracing */
346 wakeup_reset(tr); 360 wakeup_reset(tr);
361
362 if (!save_lat_flag)
363 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
347} 364}
348 365
349static void wakeup_tracer_start(struct trace_array *tr) 366static void wakeup_tracer_start(struct trace_array *tr)
350{ 367{
351 wakeup_reset(tr); 368 wakeup_reset(tr);
352 tracer_enabled = 1; 369 tracer_enabled = 1;
353 save_tracer_enabled = 1;
354} 370}
355 371
356static void wakeup_tracer_stop(struct trace_array *tr) 372static void wakeup_tracer_stop(struct trace_array *tr)
357{ 373{
358 tracer_enabled = 0; 374 tracer_enabled = 0;
359 save_tracer_enabled = 0;
360}
361
362static void wakeup_tracer_open(struct trace_iterator *iter)
363{
364 /* stop the trace while dumping */
365 tracer_enabled = 0;
366}
367
368static void wakeup_tracer_close(struct trace_iterator *iter)
369{
370 /* forget about any processes we were recording */
371 if (save_tracer_enabled) {
372 wakeup_reset(iter->tr);
373 tracer_enabled = 1;
374 }
375} 375}
376 376
377static struct tracer wakeup_tracer __read_mostly = 377static struct tracer wakeup_tracer __read_mostly =
@@ -381,8 +381,20 @@ static struct tracer wakeup_tracer __read_mostly =
381 .reset = wakeup_tracer_reset, 381 .reset = wakeup_tracer_reset,
382 .start = wakeup_tracer_start, 382 .start = wakeup_tracer_start,
383 .stop = wakeup_tracer_stop, 383 .stop = wakeup_tracer_stop,
384 .open = wakeup_tracer_open, 384 .print_max = 1,
385 .close = wakeup_tracer_close, 385#ifdef CONFIG_FTRACE_SELFTEST
386 .selftest = trace_selftest_startup_wakeup,
387#endif
388};
389
390static struct tracer wakeup_rt_tracer __read_mostly =
391{
392 .name = "wakeup_rt",
393 .init = wakeup_rt_tracer_init,
394 .reset = wakeup_tracer_reset,
395 .start = wakeup_tracer_start,
396 .stop = wakeup_tracer_stop,
397 .wait_pipe = poll_wait_pipe,
386 .print_max = 1, 398 .print_max = 1,
387#ifdef CONFIG_FTRACE_SELFTEST 399#ifdef CONFIG_FTRACE_SELFTEST
388 .selftest = trace_selftest_startup_wakeup, 400 .selftest = trace_selftest_startup_wakeup,
@@ -397,6 +409,10 @@ __init static int init_wakeup_tracer(void)
397 if (ret) 409 if (ret)
398 return ret; 410 return ret;
399 411
412 ret = register_tracer(&wakeup_rt_tracer);
413 if (ret)
414 return ret;
415
400 return 0; 416 return 0;
401} 417}
402device_initcall(init_wakeup_tracer); 418device_initcall(init_wakeup_tracer);