aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-11-13 10:45:38 -0500
committerIngo Molnar <mingo@kernel.org>2013-11-27 07:50:53 -0500
commit192301e70af3f6803c6354a464ebfa742da738ae (patch)
treea13e157f7490e18ae043435be9db0d3f114bfa71 /kernel/sched/core.c
parent86506a99a62400e9f7b7d1344bcc9ea235faf98f (diff)
sched: Check TASK_DEAD rather than EXIT_DEAD in schedule_debug()
schedule_debug() ignores in_atomic() if prev->exit_state != 0. This is not what we want, ->exit_state is set by exit_notify() but we should complain until the task does the last schedule() in TASK_DEAD. See also 7407251a0e2e "PF_DEAD cleanup", I think this ancient commit explains why schedule() had to rely on ->exit_state, until that commit exit_notify() disabled preemption and set PF_DEAD which was used to detect the exiting task. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: David Laight <David.Laight@ACULAB.COM> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20131113154538.GB15810@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 687985b0284e..19db8f3b0e3b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2414,10 +2414,10 @@ static inline void schedule_debug(struct task_struct *prev)
2414{ 2414{
2415 /* 2415 /*
2416 * Test if we are atomic. Since do_exit() needs to call into 2416 * Test if we are atomic. Since do_exit() needs to call into
2417 * schedule() atomically, we ignore that path for now. 2417 * schedule() atomically, we ignore that path. Otherwise whine
2418 * Otherwise, whine if we are scheduling when we should not be. 2418 * if we are scheduling when we should not.
2419 */ 2419 */
2420 if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) 2420 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
2421 __schedule_bug(prev); 2421 __schedule_bug(prev);
2422 rcu_sleep_check(); 2422 rcu_sleep_check();
2423 2423