diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-05 11:23:46 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-14 02:52:36 -0400 |
commit | 7608dec2ce2004c234339bef8c8074e5e601d0e9 (patch) | |
tree | a855754a4fa3de6fe0d287c9d94d58d7bd6e8978 /kernel/sched.c | |
parent | 013fdb8086acaae5f8eb96f9ad48fcd98882ac46 (diff) |
sched: Drop the rq argument to sched_class::select_task_rq()
In preparation of calling select_task_rq() without rq->lock held, drop
the dependency on the rq argument.
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152729.031077745@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d398f2f0a3c9..d4b815d345b3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2195,13 +2195,15 @@ static int migration_cpu_stop(void *data); | |||
2195 | * The task's runqueue lock must be held. | 2195 | * The task's runqueue lock must be held. |
2196 | * Returns true if you have to wait for migration thread. | 2196 | * Returns true if you have to wait for migration thread. |
2197 | */ | 2197 | */ |
2198 | static bool migrate_task(struct task_struct *p, struct rq *rq) | 2198 | static bool need_migrate_task(struct task_struct *p) |
2199 | { | 2199 | { |
2200 | /* | 2200 | /* |
2201 | * If the task is not on a runqueue (and not running), then | 2201 | * If the task is not on a runqueue (and not running), then |
2202 | * the next wake-up will properly place the task. | 2202 | * the next wake-up will properly place the task. |
2203 | */ | 2203 | */ |
2204 | return p->on_rq || task_running(rq, p); | 2204 | bool running = p->on_rq || p->on_cpu; |
2205 | smp_rmb(); /* finish_lock_switch() */ | ||
2206 | return running; | ||
2205 | } | 2207 | } |
2206 | 2208 | ||
2207 | /* | 2209 | /* |
@@ -2376,9 +2378,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2376 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. | 2378 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
2377 | */ | 2379 | */ |
2378 | static inline | 2380 | static inline |
2379 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) | 2381 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) |
2380 | { | 2382 | { |
2381 | int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); | 2383 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); |
2382 | 2384 | ||
2383 | /* | 2385 | /* |
2384 | * In order not to call set_task_cpu() on a blocking task we need | 2386 | * In order not to call set_task_cpu() on a blocking task we need |
@@ -2533,7 +2535,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2533 | en_flags |= ENQUEUE_WAKING; | 2535 | en_flags |= ENQUEUE_WAKING; |
2534 | } | 2536 | } |
2535 | 2537 | ||
2536 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); | 2538 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); |
2537 | if (cpu != orig_cpu) | 2539 | if (cpu != orig_cpu) |
2538 | set_task_cpu(p, cpu); | 2540 | set_task_cpu(p, cpu); |
2539 | __task_rq_unlock(rq); | 2541 | __task_rq_unlock(rq); |
@@ -2744,7 +2746,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2744 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock | 2746 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock |
2745 | * without people poking at ->cpus_allowed. | 2747 | * without people poking at ->cpus_allowed. |
2746 | */ | 2748 | */ |
2747 | cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); | 2749 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); |
2748 | set_task_cpu(p, cpu); | 2750 | set_task_cpu(p, cpu); |
2749 | 2751 | ||
2750 | p->state = TASK_RUNNING; | 2752 | p->state = TASK_RUNNING; |
@@ -3474,7 +3476,7 @@ void sched_exec(void) | |||
3474 | int dest_cpu; | 3476 | int dest_cpu; |
3475 | 3477 | ||
3476 | rq = task_rq_lock(p, &flags); | 3478 | rq = task_rq_lock(p, &flags); |
3477 | dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); | 3479 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); |
3478 | if (dest_cpu == smp_processor_id()) | 3480 | if (dest_cpu == smp_processor_id()) |
3479 | goto unlock; | 3481 | goto unlock; |
3480 | 3482 | ||
@@ -3482,7 +3484,7 @@ void sched_exec(void) | |||
3482 | * select_task_rq() can race against ->cpus_allowed | 3484 | * select_task_rq() can race against ->cpus_allowed |
3483 | */ | 3485 | */ |
3484 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && | 3486 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
3485 | likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { | 3487 | likely(cpu_active(dest_cpu)) && need_migrate_task(p)) { |
3486 | struct migration_arg arg = { p, dest_cpu }; | 3488 | struct migration_arg arg = { p, dest_cpu }; |
3487 | 3489 | ||
3488 | task_rq_unlock(rq, &flags); | 3490 | task_rq_unlock(rq, &flags); |
@@ -5911,7 +5913,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5911 | goto out; | 5913 | goto out; |
5912 | 5914 | ||
5913 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); | 5915 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
5914 | if (migrate_task(p, rq)) { | 5916 | if (need_migrate_task(p)) { |
5915 | struct migration_arg arg = { p, dest_cpu }; | 5917 | struct migration_arg arg = { p, dest_cpu }; |
5916 | /* Need help from migration thread: drop lock and wait. */ | 5918 | /* Need help from migration thread: drop lock and wait. */ |
5917 | __task_rq_unlock(rq); | 5919 | __task_rq_unlock(rq); |