aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-22 13:47:01 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-10-03 14:40:09 -0400
commitf4e97b682ac92d2aed3db68f396b022113d9ad30 (patch)
treefcaf4fb3e45f8a89313b003a10765f1dc97cbbaa /kernel/sched.c
parentedbb7ce79e62d1028781b58337100108dc41471e (diff)
sched: Move blk_schedule_flush_plug() out of __schedule()
commit 9c40cef2b799f9b5e7fa5de4d2ad3a0168ba118c upstream. There is no real reason to run blk_schedule_flush_plug() with interrupts and preemption disabled. Move it into schedule() and call it when the task is going voluntarily to sleep. There might be false positives when the task is woken between that call and actually scheduling, but that's not really different from being woken immediately after switching away. This fixes a deadlock in the scheduler where the blk_schedule_flush_plug() callchain enables interrupts and thereby allows a wakeup to happen of the task that's going to sleep. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/n/tip-dwfxtra7yg1b5r65m32ywtct@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 64f93d18e4d..ce2a3406ec0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4285,16 +4285,6 @@ need_resched:
4285 if (to_wakeup) 4285 if (to_wakeup)
4286 try_to_wake_up_local(to_wakeup); 4286 try_to_wake_up_local(to_wakeup);
4287 } 4287 }
4288
4289 /*
4290 * If we are going to sleep and we have plugged IO
4291 * queued, make sure to submit it to avoid deadlocks.
4292 */
4293 if (blk_needs_flush_plug(prev)) {
4294 raw_spin_unlock(&rq->lock);
4295 blk_schedule_flush_plug(prev);
4296 raw_spin_lock(&rq->lock);
4297 }
4298 } 4288 }
4299 switch_count = &prev->nvcsw; 4289 switch_count = &prev->nvcsw;
4300 } 4290 }
@@ -4333,8 +4323,23 @@ need_resched:
4333 goto need_resched; 4323 goto need_resched;
4334} 4324}
4335 4325
4326static inline void sched_submit_work(struct task_struct *tsk)
4327{
4328 if (!tsk->state)
4329 return;
4330 /*
4331 * If we are going to sleep and we have plugged IO queued,
4332 * make sure to submit it to avoid deadlocks.
4333 */
4334 if (blk_needs_flush_plug(tsk))
4335 blk_schedule_flush_plug(tsk);
4336}
4337
4336asmlinkage void schedule(void) 4338asmlinkage void schedule(void)
4337{ 4339{
4340 struct task_struct *tsk = current;
4341
4342 sched_submit_work(tsk);
4338 __schedule(); 4343 __schedule();
4339} 4344}
4340EXPORT_SYMBOL(schedule); 4345EXPORT_SYMBOL(schedule);