aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-22 13:47:01 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-29 06:26:59 -0400
commit9c40cef2b799f9b5e7fa5de4d2ad3a0168ba118c (patch)
treecb7a48eb2a6d30b90144a55bc6bb82caacfc9622 /kernel
parentc259e01a1ec90063042f758e409cd26b2a0963c8 (diff)
sched: Move blk_schedule_flush_plug() out of __schedule()
There is no real reason to run blk_schedule_flush_plug() with interrupts and preemption disabled. Move it into schedule() and call it when the task is going voluntarily to sleep. There might be false positives when the task is woken between that call and actually scheduling, but that's not really different from being woken immediately after switching away. This fixes a deadlock in the scheduler where the blk_schedule_flush_plug() callchain enables interrupts and thereby allows a wakeup to happen of the task that's going to sleep. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: stable@kernel.org # 2.6.39+ Link: http://lkml.kernel.org/n/tip-dwfxtra7yg1b5r65m32ywtct@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ec15e8129cf7..511732c39b6e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4322,16 +4322,6 @@ need_resched:
4322 if (to_wakeup) 4322 if (to_wakeup)
4323 try_to_wake_up_local(to_wakeup); 4323 try_to_wake_up_local(to_wakeup);
4324 } 4324 }
4325
4326 /*
4327 * If we are going to sleep and we have plugged IO
4328 * queued, make sure to submit it to avoid deadlocks.
4329 */
4330 if (blk_needs_flush_plug(prev)) {
4331 raw_spin_unlock(&rq->lock);
4332 blk_schedule_flush_plug(prev);
4333 raw_spin_lock(&rq->lock);
4334 }
4335 } 4325 }
4336 switch_count = &prev->nvcsw; 4326 switch_count = &prev->nvcsw;
4337 } 4327 }
@@ -4370,8 +4360,23 @@ need_resched:
4370 goto need_resched; 4360 goto need_resched;
4371} 4361}
4372 4362
4363static inline void sched_submit_work(struct task_struct *tsk)
4364{
4365 if (!tsk->state)
4366 return;
4367 /*
4368 * If we are going to sleep and we have plugged IO queued,
4369 * make sure to submit it to avoid deadlocks.
4370 */
4371 if (blk_needs_flush_plug(tsk))
4372 blk_schedule_flush_plug(tsk);
4373}
4374
4373asmlinkage void schedule(void) 4375asmlinkage void schedule(void)
4374{ 4376{
4377 struct task_struct *tsk = current;
4378
4379 sched_submit_work(tsk);
4375 __schedule(); 4380 __schedule();
4376} 4381}
4377EXPORT_SYMBOL(schedule); 4382EXPORT_SYMBOL(schedule);