diff options
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 93 |
1 files changed, 72 insertions, 21 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a1..fee77e15d815 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -36,7 +36,8 @@ static void __wakeup_reset(struct trace_array *tr); | |||
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
38 | 38 | ||
39 | static int save_lat_flag; | 39 | static int save_flags; |
40 | static bool function_enabled; | ||
40 | 41 | ||
41 | #define TRACE_DISPLAY_GRAPH 1 | 42 | #define TRACE_DISPLAY_GRAPH 1 |
42 | 43 | ||
@@ -89,7 +90,7 @@ func_prolog_preempt_disable(struct trace_array *tr, | |||
89 | if (cpu != wakeup_current_cpu) | 90 | if (cpu != wakeup_current_cpu) |
90 | goto out_enable; | 91 | goto out_enable; |
91 | 92 | ||
92 | *data = tr->data[cpu]; | 93 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
93 | disabled = atomic_inc_return(&(*data)->disabled); | 94 | disabled = atomic_inc_return(&(*data)->disabled); |
94 | if (unlikely(disabled != 1)) | 95 | if (unlikely(disabled != 1)) |
95 | goto out; | 96 | goto out; |
@@ -134,15 +135,60 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
134 | }; | 135 | }; |
135 | #endif /* CONFIG_FUNCTION_TRACER */ | 136 | #endif /* CONFIG_FUNCTION_TRACER */ |
136 | 137 | ||
137 | static int start_func_tracer(int graph) | 138 | static int register_wakeup_function(int graph, int set) |
138 | { | 139 | { |
139 | int ret; | 140 | int ret; |
140 | 141 | ||
141 | if (!graph) | 142 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
142 | ret = register_ftrace_function(&trace_ops); | 143 | if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) |
143 | else | 144 | return 0; |
145 | |||
146 | if (graph) | ||
144 | ret = register_ftrace_graph(&wakeup_graph_return, | 147 | ret = register_ftrace_graph(&wakeup_graph_return, |
145 | &wakeup_graph_entry); | 148 | &wakeup_graph_entry); |
149 | else | ||
150 | ret = register_ftrace_function(&trace_ops); | ||
151 | |||
152 | if (!ret) | ||
153 | function_enabled = true; | ||
154 | |||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | static void unregister_wakeup_function(int graph) | ||
159 | { | ||
160 | if (!function_enabled) | ||
161 | return; | ||
162 | |||
163 | if (graph) | ||
164 | unregister_ftrace_graph(); | ||
165 | else | ||
166 | unregister_ftrace_function(&trace_ops); | ||
167 | |||
168 | function_enabled = false; | ||
169 | } | ||
170 | |||
171 | static void wakeup_function_set(int set) | ||
172 | { | ||
173 | if (set) | ||
174 | register_wakeup_function(is_graph(), 1); | ||
175 | else | ||
176 | unregister_wakeup_function(is_graph()); | ||
177 | } | ||
178 | |||
179 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | ||
180 | { | ||
181 | if (mask & TRACE_ITER_FUNCTION) | ||
182 | wakeup_function_set(set); | ||
183 | |||
184 | return trace_keep_overwrite(tracer, mask, set); | ||
185 | } | ||
186 | |||
187 | static int start_func_tracer(int graph) | ||
188 | { | ||
189 | int ret; | ||
190 | |||
191 | ret = register_wakeup_function(graph, 0); | ||
146 | 192 | ||
147 | if (!ret && tracing_is_enabled()) | 193 | if (!ret && tracing_is_enabled()) |
148 | tracer_enabled = 1; | 194 | tracer_enabled = 1; |
@@ -156,10 +202,7 @@ static void stop_func_tracer(int graph) | |||
156 | { | 202 | { |
157 | tracer_enabled = 0; | 203 | tracer_enabled = 0; |
158 | 204 | ||
159 | if (!graph) | 205 | unregister_wakeup_function(graph); |
160 | unregister_ftrace_function(&trace_ops); | ||
161 | else | ||
162 | unregister_ftrace_graph(); | ||
163 | } | 206 | } |
164 | 207 | ||
165 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 208 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -353,7 +396,7 @@ probe_wakeup_sched_switch(void *ignore, | |||
353 | 396 | ||
354 | /* disable local data, not wakeup_cpu data */ | 397 | /* disable local data, not wakeup_cpu data */ |
355 | cpu = raw_smp_processor_id(); | 398 | cpu = raw_smp_processor_id(); |
356 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); | 399 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
357 | if (likely(disabled != 1)) | 400 | if (likely(disabled != 1)) |
358 | goto out; | 401 | goto out; |
359 | 402 | ||
@@ -365,7 +408,7 @@ probe_wakeup_sched_switch(void *ignore, | |||
365 | goto out_unlock; | 408 | goto out_unlock; |
366 | 409 | ||
367 | /* The task we are waiting for is waking up */ | 410 | /* The task we are waiting for is waking up */ |
368 | data = wakeup_trace->data[wakeup_cpu]; | 411 | data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
369 | 412 | ||
370 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); | 413 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
371 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | 414 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
@@ -387,7 +430,7 @@ out_unlock: | |||
387 | arch_spin_unlock(&wakeup_lock); | 430 | arch_spin_unlock(&wakeup_lock); |
388 | local_irq_restore(flags); | 431 | local_irq_restore(flags); |
389 | out: | 432 | out: |
390 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 433 | atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
391 | } | 434 | } |
392 | 435 | ||
393 | static void __wakeup_reset(struct trace_array *tr) | 436 | static void __wakeup_reset(struct trace_array *tr) |
@@ -405,7 +448,7 @@ static void wakeup_reset(struct trace_array *tr) | |||
405 | { | 448 | { |
406 | unsigned long flags; | 449 | unsigned long flags; |
407 | 450 | ||
408 | tracing_reset_online_cpus(tr); | 451 | tracing_reset_online_cpus(&tr->trace_buffer); |
409 | 452 | ||
410 | local_irq_save(flags); | 453 | local_irq_save(flags); |
411 | arch_spin_lock(&wakeup_lock); | 454 | arch_spin_lock(&wakeup_lock); |
@@ -435,7 +478,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
435 | return; | 478 | return; |
436 | 479 | ||
437 | pc = preempt_count(); | 480 | pc = preempt_count(); |
438 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); | 481 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
439 | if (unlikely(disabled != 1)) | 482 | if (unlikely(disabled != 1)) |
440 | goto out; | 483 | goto out; |
441 | 484 | ||
@@ -458,7 +501,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
458 | 501 | ||
459 | local_save_flags(flags); | 502 | local_save_flags(flags); |
460 | 503 | ||
461 | data = wakeup_trace->data[wakeup_cpu]; | 504 | data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
462 | data->preempt_timestamp = ftrace_now(cpu); | 505 | data->preempt_timestamp = ftrace_now(cpu); |
463 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); | 506 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
464 | 507 | ||
@@ -472,7 +515,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
472 | out_locked: | 515 | out_locked: |
473 | arch_spin_unlock(&wakeup_lock); | 516 | arch_spin_unlock(&wakeup_lock); |
474 | out: | 517 | out: |
475 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 518 | atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
476 | } | 519 | } |
477 | 520 | ||
478 | static void start_wakeup_tracer(struct trace_array *tr) | 521 | static void start_wakeup_tracer(struct trace_array *tr) |
@@ -540,8 +583,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
540 | 583 | ||
541 | static int __wakeup_tracer_init(struct trace_array *tr) | 584 | static int __wakeup_tracer_init(struct trace_array *tr) |
542 | { | 585 | { |
543 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 586 | save_flags = trace_flags; |
544 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 587 | |
588 | /* non overwrite screws up the latency tracers */ | ||
589 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); | ||
590 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | ||
545 | 591 | ||
546 | tracing_max_latency = 0; | 592 | tracing_max_latency = 0; |
547 | wakeup_trace = tr; | 593 | wakeup_trace = tr; |
@@ -563,12 +609,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) | |||
563 | 609 | ||
564 | static void wakeup_tracer_reset(struct trace_array *tr) | 610 | static void wakeup_tracer_reset(struct trace_array *tr) |
565 | { | 611 | { |
612 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
613 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
614 | |||
566 | stop_wakeup_tracer(tr); | 615 | stop_wakeup_tracer(tr); |
567 | /* make sure we put back any tasks we are tracing */ | 616 | /* make sure we put back any tasks we are tracing */ |
568 | wakeup_reset(tr); | 617 | wakeup_reset(tr); |
569 | 618 | ||
570 | if (!save_lat_flag) | 619 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
571 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 620 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
572 | } | 621 | } |
573 | 622 | ||
574 | static void wakeup_tracer_start(struct trace_array *tr) | 623 | static void wakeup_tracer_start(struct trace_array *tr) |
@@ -594,6 +643,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
594 | .print_line = wakeup_print_line, | 643 | .print_line = wakeup_print_line, |
595 | .flags = &tracer_flags, | 644 | .flags = &tracer_flags, |
596 | .set_flag = wakeup_set_flag, | 645 | .set_flag = wakeup_set_flag, |
646 | .flag_changed = wakeup_flag_changed, | ||
597 | #ifdef CONFIG_FTRACE_SELFTEST | 647 | #ifdef CONFIG_FTRACE_SELFTEST |
598 | .selftest = trace_selftest_startup_wakeup, | 648 | .selftest = trace_selftest_startup_wakeup, |
599 | #endif | 649 | #endif |
@@ -615,6 +665,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
615 | .print_line = wakeup_print_line, | 665 | .print_line = wakeup_print_line, |
616 | .flags = &tracer_flags, | 666 | .flags = &tracer_flags, |
617 | .set_flag = wakeup_set_flag, | 667 | .set_flag = wakeup_set_flag, |
668 | .flag_changed = wakeup_flag_changed, | ||
618 | #ifdef CONFIG_FTRACE_SELFTEST | 669 | #ifdef CONFIG_FTRACE_SELFTEST |
619 | .selftest = trace_selftest_startup_wakeup, | 670 | .selftest = trace_selftest_startup_wakeup, |
620 | #endif | 671 | #endif |