diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-05 11:23:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-14 02:52:39 -0400 |
commit | 8f42ced974df7d5af2de4cf5ea21fe978c7e4478 (patch) | |
tree | 514ee978491dc62ee727308be1f228987c47eb54 /kernel/sched.c | |
parent | ab2515c4b98f7bc4fa11cad9fa0f811d63a72a26 (diff) |
sched: Drop rq->lock from sched_exec()
Since we can now call select_task_rq() and set_task_cpu() with only
p->pi_lock held, and sched_exec() load-balancing has always been
optimistic, drop all rq->lock usage.
Oleg also noted that need_migrate_task() will always be true for
current, so don't bother calling that at all.
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152729.314204889@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 7c5494dccd39..1be1a09b9dc9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3465,27 +3465,22 @@ void sched_exec(void) | |||
3465 | { | 3465 | { |
3466 | struct task_struct *p = current; | 3466 | struct task_struct *p = current; |
3467 | unsigned long flags; | 3467 | unsigned long flags; |
3468 | struct rq *rq; | ||
3469 | int dest_cpu; | 3468 | int dest_cpu; |
3470 | 3469 | ||
3471 | rq = task_rq_lock(p, &flags); | 3470 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
3472 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); | 3471 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); |
3473 | if (dest_cpu == smp_processor_id()) | 3472 | if (dest_cpu == smp_processor_id()) |
3474 | goto unlock; | 3473 | goto unlock; |
3475 | 3474 | ||
3476 | /* | 3475 | if (likely(cpu_active(dest_cpu))) { |
3477 | * select_task_rq() can race against ->cpus_allowed | ||
3478 | */ | ||
3479 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && | ||
3480 | likely(cpu_active(dest_cpu)) && need_migrate_task(p)) { | ||
3481 | struct migration_arg arg = { p, dest_cpu }; | 3476 | struct migration_arg arg = { p, dest_cpu }; |
3482 | 3477 | ||
3483 | task_rq_unlock(rq, p, &flags); | 3478 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
3484 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); | 3479 | stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
3485 | return; | 3480 | return; |
3486 | } | 3481 | } |
3487 | unlock: | 3482 | unlock: |
3488 | task_rq_unlock(rq, p, &flags); | 3483 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
3489 | } | 3484 | } |
3490 | 3485 | ||
3491 | #endif | 3486 | #endif |