aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-28 12:06:56 -0400
committerIngo Molnar <mingo@kernel.org>2015-10-06 11:08:15 -0400
commitc73464b1c8434ad4cbfd5369c3e724f3e8ffe5a4 (patch)
tree3823bb8bc9fe7f97a6e26283354e62e96a7680fd /include/trace
parentfc13aebab7d8f0d19d557c721a0f25cdf7ae905c (diff)
sched/core: Fix trace_sched_switch()
__trace_sched_switch_state() is the last remaining PREEMPT_ACTIVE user, move trace_sched_switch() from prepare_task_switch() to __schedule() and propagate the @preempt argument. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/sched.h22
1 files changed, 9 insertions, 13 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 539d6bc3216a..9b90c57517a9 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
104 TP_ARGS(p)); 104 TP_ARGS(p));
105 105
106#ifdef CREATE_TRACE_POINTS 106#ifdef CREATE_TRACE_POINTS
107static inline long __trace_sched_switch_state(struct task_struct *p) 107static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
108{ 108{
109 long state = p->state;
110
111#ifdef CONFIG_PREEMPT
112#ifdef CONFIG_SCHED_DEBUG 109#ifdef CONFIG_SCHED_DEBUG
113 BUG_ON(p != current); 110 BUG_ON(p != current);
114#endif /* CONFIG_SCHED_DEBUG */ 111#endif /* CONFIG_SCHED_DEBUG */
112
115 /* 113 /*
116 * For all intents and purposes a preempted task is a running task. 114 * Preemption ignores task state, therefore preempted tasks are always
115 * RUNNING (we will not have dequeued if state != RUNNING).
117 */ 116 */
118 if (preempt_count() & PREEMPT_ACTIVE) 117 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
119 state = TASK_RUNNING | TASK_STATE_MAX;
120#endif /* CONFIG_PREEMPT */
121
122 return state;
123} 118}
124#endif /* CREATE_TRACE_POINTS */ 119#endif /* CREATE_TRACE_POINTS */
125 120
@@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
128 */ 123 */
129TRACE_EVENT(sched_switch, 124TRACE_EVENT(sched_switch,
130 125
131 TP_PROTO(struct task_struct *prev, 126 TP_PROTO(bool preempt,
127 struct task_struct *prev,
132 struct task_struct *next), 128 struct task_struct *next),
133 129
134 TP_ARGS(prev, next), 130 TP_ARGS(preempt, prev, next),
135 131
136 TP_STRUCT__entry( 132 TP_STRUCT__entry(
137 __array( char, prev_comm, TASK_COMM_LEN ) 133 __array( char, prev_comm, TASK_COMM_LEN )
@@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch,
147 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); 143 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
148 __entry->prev_pid = prev->pid; 144 __entry->prev_pid = prev->pid;
149 __entry->prev_prio = prev->prio; 145 __entry->prev_prio = prev->prio;
150 __entry->prev_state = __trace_sched_switch_state(prev); 146 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
151 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); 147 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
152 __entry->next_pid = next->pid; 148 __entry->next_pid = next->pid;
153 __entry->next_prio = next->prio; 149 __entry->next_prio = next->prio;