aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index bddf676914ed..5671db0e1827 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
36 data = tr->data[cpu]; 36 data = tr->data[cpu];
37 disabled = atomic_inc_return(&data->disabled); 37 disabled = atomic_inc_return(&data->disabled);
38 38
39 if (likely(disabled == 1)) { 39 if (likely(disabled == 1))
40 tracing_sched_switch_trace(tr, data, prev, next, flags); 40 tracing_sched_switch_trace(tr, data, prev, next, flags);
41 if (trace_flags & TRACE_ITER_SCHED_TREE)
42 ftrace_all_fair_tasks(__rq, tr, data);
43 }
44 41
45 atomic_dec(&data->disabled); 42 atomic_dec(&data->disabled);
46 local_irq_restore(flags); 43 local_irq_restore(flags);
@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
65 data = tr->data[cpu]; 62 data = tr->data[cpu];
66 disabled = atomic_inc_return(&data->disabled); 63 disabled = atomic_inc_return(&data->disabled);
67 64
68 if (likely(disabled == 1)) { 65 if (likely(disabled == 1))
69 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); 66 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
70 if (trace_flags & TRACE_ITER_SCHED_TREE)
71 ftrace_all_fair_tasks(__rq, tr, data);
72 }
73 67
74 atomic_dec(&data->disabled); 68 atomic_dec(&data->disabled);
75 local_irq_restore(flags); 69 local_irq_restore(flags);