diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:03 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:50:17 -0500 |
commit | 1e5ce4f4a755ee498bd9217dae26143afa0d8f31 (patch) | |
tree | 58794312cf975daa27e8b8f7fa3cc4f65555f6b2 /kernel/sched.c | |
parent | 3404c8d97c2d3eb87b1bf4aadad957bfb5235b14 (diff) |
sched: remove any_online_cpu()
Impact: use new API
any_online_cpu() is a good name, but it takes a cpumask_t, not a
pointer.
There are several places where any_online_cpu() doesn't really want a
mask arg at all. Replace all callers with cpumask_any() and
cpumask_any_and().
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0dc9d5752d68..a2de33d05340 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5964,7 +5964,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5964 | if (cpu_isset(task_cpu(p), *new_mask)) | 5964 | if (cpu_isset(task_cpu(p), *new_mask)) |
5965 | goto out; | 5965 | goto out; |
5966 | 5966 | ||
5967 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 5967 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
5968 | /* Need help from migration thread: drop lock and wait. */ | 5968 | /* Need help from migration thread: drop lock and wait. */ |
5969 | task_rq_unlock(rq, &flags); | 5969 | task_rq_unlock(rq, &flags); |
5970 | wake_up_process(rq->migration_thread); | 5970 | wake_up_process(rq->migration_thread); |
@@ -6113,11 +6113,12 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6113 | node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu)); | 6113 | node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu)); |
6114 | 6114 | ||
6115 | cpus_and(mask, *pnodemask, p->cpus_allowed); | 6115 | cpus_and(mask, *pnodemask, p->cpus_allowed); |
6116 | dest_cpu = any_online_cpu(mask); | 6116 | dest_cpu = cpumask_any_and(cpu_online_mask, &mask); |
6117 | 6117 | ||
6118 | /* On any allowed CPU? */ | 6118 | /* On any allowed CPU? */ |
6119 | if (dest_cpu >= nr_cpu_ids) | 6119 | if (dest_cpu >= nr_cpu_ids) |
6120 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6120 | dest_cpu = cpumask_any_and(cpu_online_mask, |
6121 | &p->cpus_allowed); | ||
6121 | 6122 | ||
6122 | /* No more Mr. Nice Guy. */ | 6123 | /* No more Mr. Nice Guy. */ |
6123 | if (dest_cpu >= nr_cpu_ids) { | 6124 | if (dest_cpu >= nr_cpu_ids) { |
@@ -6133,7 +6134,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6133 | */ | 6134 | */ |
6134 | rq = task_rq_lock(p, &flags); | 6135 | rq = task_rq_lock(p, &flags); |
6135 | p->cpus_allowed = cpus_allowed; | 6136 | p->cpus_allowed = cpus_allowed; |
6136 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6137 | dest_cpu = cpumask_any_and(cpu_online_mask, |
6138 | &p->cpus_allowed); | ||
6137 | task_rq_unlock(rq, &flags); | 6139 | task_rq_unlock(rq, &flags); |
6138 | 6140 | ||
6139 | /* | 6141 | /* |
@@ -6159,7 +6161,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6159 | */ | 6161 | */ |
6160 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6162 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
6161 | { | 6163 | { |
6162 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6164 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
6163 | unsigned long flags; | 6165 | unsigned long flags; |
6164 | 6166 | ||
6165 | local_irq_save(flags); | 6167 | local_irq_save(flags); |
@@ -6524,7 +6526,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6524 | break; | 6526 | break; |
6525 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6527 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
6526 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6528 | kthread_bind(cpu_rq(cpu)->migration_thread, |
6527 | any_online_cpu(cpu_online_map)); | 6529 | cpumask_any(cpu_online_mask)); |
6528 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6530 | kthread_stop(cpu_rq(cpu)->migration_thread); |
6529 | cpu_rq(cpu)->migration_thread = NULL; | 6531 | cpu_rq(cpu)->migration_thread = NULL; |
6530 | break; | 6532 | break; |