aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:50 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:37 -0400
commit2acca55ed98ad9b9aa25e7e587ebe306c0313dc7 (patch)
treeb06fcc57dc284ccf8dcbda90370fd08250887421 /kernel/sched.c
parenta8e4f2eaecc9bfa4954adf79a04f4f22fddd829c (diff)
sched: Also serialize ttwu_local() with p->pi_lock
Since we now serialize ttwu() using p->pi_lock, we also need to serialize ttwu_local() using that, otherwise, once we drop the rq->lock from ttwu() it can race with ttwu_local(). Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152729.192366907@chello.nl
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fd32b78c123c..6b269b79c52c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2566,9 +2566,9 @@ out:
2566 * try_to_wake_up_local - try to wake up a local task with rq lock held 2566 * try_to_wake_up_local - try to wake up a local task with rq lock held
2567 * @p: the thread to be awakened 2567 * @p: the thread to be awakened
2568 * 2568 *
2569 * Put @p on the run-queue if it's not already there. The caller must 2569 * Put @p on the run-queue if it's not already there. The caller must
2570 * ensure that this_rq() is locked, @p is bound to this_rq() and not 2570 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2571 * the current task. this_rq() stays locked over invocation. 2571 * the current task.
2572 */ 2572 */
2573static void try_to_wake_up_local(struct task_struct *p) 2573static void try_to_wake_up_local(struct task_struct *p)
2574{ 2574{
@@ -2578,14 +2578,22 @@ static void try_to_wake_up_local(struct task_struct *p)
2578 BUG_ON(p == current); 2578 BUG_ON(p == current);
2579 lockdep_assert_held(&rq->lock); 2579 lockdep_assert_held(&rq->lock);
2580 2580
2581 if (!raw_spin_trylock(&p->pi_lock)) {
2582 raw_spin_unlock(&rq->lock);
2583 raw_spin_lock(&p->pi_lock);
2584 raw_spin_lock(&rq->lock);
2585 }
2586
2581 if (!(p->state & TASK_NORMAL)) 2587 if (!(p->state & TASK_NORMAL))
2582 return; 2588 goto out;
2583 2589
2584 if (!p->on_rq) 2590 if (!p->on_rq)
2585 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2591 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2586 2592
2587 ttwu_post_activation(p, rq, 0); 2593 ttwu_post_activation(p, rq, 0);
2588 ttwu_stat(rq, p, smp_processor_id(), 0); 2594 ttwu_stat(rq, p, smp_processor_id(), 0);
2595out:
2596 raw_spin_unlock(&p->pi_lock);
2589} 2597}
2590 2598
2591/** 2599/**
@@ -4114,11 +4122,13 @@ need_resched:
4114 if (unlikely(signal_pending_state(prev->state, prev))) { 4122 if (unlikely(signal_pending_state(prev->state, prev))) {
4115 prev->state = TASK_RUNNING; 4123 prev->state = TASK_RUNNING;
4116 } else { 4124 } else {
4125 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4126 prev->on_rq = 0;
4127
4117 /* 4128 /*
4118 * If a worker is going to sleep, notify and 4129 * If a worker went to sleep, notify and ask workqueue
4119 * ask workqueue whether it wants to wake up a 4130 * whether it wants to wake up a task to maintain
4120 * task to maintain concurrency. If so, wake 4131 * concurrency.
4121 * up the task.
4122 */ 4132 */
4123 if (prev->flags & PF_WQ_WORKER) { 4133 if (prev->flags & PF_WQ_WORKER) {
4124 struct task_struct *to_wakeup; 4134 struct task_struct *to_wakeup;
@@ -4128,12 +4138,9 @@ need_resched:
4128 try_to_wake_up_local(to_wakeup); 4138 try_to_wake_up_local(to_wakeup);
4129 } 4139 }
4130 4140
4131 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4132 prev->on_rq = 0;
4133
4134 /* 4141 /*
4135 * If we are going to sleep and we have plugged IO queued, make 4142 * If we are going to sleep and we have plugged IO
4136 * sure to submit it to avoid deadlocks. 4143 * queued, make sure to submit it to avoid deadlocks.
4137 */ 4144 */
4138 if (blk_needs_flush_plug(prev)) { 4145 if (blk_needs_flush_plug(prev)) {
4139 raw_spin_unlock(&rq->lock); 4146 raw_spin_unlock(&rq->lock);