aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:52 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:38 -0400
commitab2515c4b98f7bc4fa11cad9fa0f811d63a72a26 (patch)
treecf70c60583a3e3dd5e6cb47193e3db9a4620608b /kernel/sched.c
parent0122ec5b02f766c355b3168df53a6c038a24fa0d (diff)
sched: Drop rq->lock from first part of wake_up_new_task()
Since p->pi_lock now protects all things needed to call select_task_rq() avoid the double remote rq->lock acquisition and rely on p->pi_lock. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152729.273362517@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f1551271a685..7c5494dccd39 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2736,28 +2736,18 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2736{ 2736{
2737 unsigned long flags; 2737 unsigned long flags;
2738 struct rq *rq; 2738 struct rq *rq;
2739 int cpu __maybe_unused = get_cpu();
2740 2739
2740 raw_spin_lock_irqsave(&p->pi_lock, flags);
2741#ifdef CONFIG_SMP 2741#ifdef CONFIG_SMP
2742 rq = task_rq_lock(p, &flags);
2743 p->state = TASK_WAKING;
2744
2745 /* 2742 /*
2746 * Fork balancing, do it here and not earlier because: 2743 * Fork balancing, do it here and not earlier because:
2747 * - cpus_allowed can change in the fork path 2744 * - cpus_allowed can change in the fork path
2748 * - any previously selected cpu might disappear through hotplug 2745 * - any previously selected cpu might disappear through hotplug
2749 *
2750 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2751 * without people poking at ->cpus_allowed.
2752 */ 2746 */
2753 cpu = select_task_rq(p, SD_BALANCE_FORK, 0); 2747 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
2754 set_task_cpu(p, cpu);
2755
2756 p->state = TASK_RUNNING;
2757 task_rq_unlock(rq, p, &flags);
2758#endif 2748#endif
2759 2749
2760 rq = task_rq_lock(p, &flags); 2750 rq = __task_rq_lock(p);
2761 activate_task(rq, p, 0); 2751 activate_task(rq, p, 0);
2762 p->on_rq = 1; 2752 p->on_rq = 1;
2763 trace_sched_wakeup_new(p, true); 2753 trace_sched_wakeup_new(p, true);
@@ -2767,7 +2757,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2767 p->sched_class->task_woken(rq, p); 2757 p->sched_class->task_woken(rq, p);
2768#endif 2758#endif
2769 task_rq_unlock(rq, p, &flags); 2759 task_rq_unlock(rq, p, &flags);
2770 put_cpu();
2771} 2760}
2772 2761
2773#ifdef CONFIG_PREEMPT_NOTIFIERS 2762#ifdef CONFIG_PREEMPT_NOTIFIERS