diff options
author | Nikanth Karthikesan <knikanth@suse.de> | 2010-11-26 02:07:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-26 09:05:36 -0500 |
commit | b7a2b39d9b7703ccf068f549c8dc3465fc41d015 (patch) | |
tree | 6890598daf8e4148a2a9ff085a6cac1f71b24c74 /kernel | |
parent | 335d7afbfb71faac833734a94240c1e07cf0ead8 (diff) |
sched: Remove unused argument dest_cpu to migrate_task()
Remove unused argument, 'dest_cpu' of migrate_task(), and pass runqueue,
as it is always known at the call site.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <201011261237.09187.knikanth@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index abe7aec55763..35a6373f1265 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2061,10 +2061,8 @@ static int migration_cpu_stop(void *data); | |||
2061 | * The task's runqueue lock must be held. | 2061 | * The task's runqueue lock must be held. |
2062 | * Returns true if you have to wait for migration thread. | 2062 | * Returns true if you have to wait for migration thread. |
2063 | */ | 2063 | */ |
2064 | static bool migrate_task(struct task_struct *p, int dest_cpu) | 2064 | static bool migrate_task(struct task_struct *p, struct rq *rq) |
2065 | { | 2065 | { |
2066 | struct rq *rq = task_rq(p); | ||
2067 | |||
2068 | /* | 2066 | /* |
2069 | * If the task is not on a runqueue (and not running), then | 2067 | * If the task is not on a runqueue (and not running), then |
2070 | * the next wake-up will properly place the task. | 2068 | * the next wake-up will properly place the task. |
@@ -3224,7 +3222,7 @@ void sched_exec(void) | |||
3224 | * select_task_rq() can race against ->cpus_allowed | 3222 | * select_task_rq() can race against ->cpus_allowed |
3225 | */ | 3223 | */ |
3226 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && | 3224 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
3227 | likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) { | 3225 | likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { |
3228 | struct migration_arg arg = { p, dest_cpu }; | 3226 | struct migration_arg arg = { p, dest_cpu }; |
3229 | 3227 | ||
3230 | task_rq_unlock(rq, &flags); | 3228 | task_rq_unlock(rq, &flags); |
@@ -5504,7 +5502,7 @@ again: | |||
5504 | goto out; | 5502 | goto out; |
5505 | 5503 | ||
5506 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); | 5504 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
5507 | if (migrate_task(p, dest_cpu)) { | 5505 | if (migrate_task(p, rq)) { |
5508 | struct migration_arg arg = { p, dest_cpu }; | 5506 | struct migration_arg arg = { p, dest_cpu }; |
5509 | /* Need help from migration thread: drop lock and wait. */ | 5507 | /* Need help from migration thread: drop lock and wait. */ |
5510 | task_rq_unlock(rq, &flags); | 5508 | task_rq_unlock(rq, &flags); |