diff options
author | Mike Travis <travis@sgi.com> | 2008-03-26 17:23:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:59 -0400 |
commit | cd8ba7cd9be0192348c2836cb6645d9b2cd2bfd2 (patch) | |
tree | 3b4138c7b683c2168ac13be41aab74b49a6bcf1c /kernel/sched.c | |
parent | e0982e90cd1ecf59818b137386b7f63debded9cc (diff) |
sched: add new set_cpus_allowed_ptr function
Add a new function that accepts a pointer to the "newly allowed cpus"
cpumask argument.
int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
The current set_cpus_allowed() function is modified to use the above
but this does not result in an ABI change. And with some compiler
optimization help, it may not introduce any additional overhead.
Additionally, to enforce the read only nature of the new_mask arg, the
"const" property is migrated to sub-functions called by set_cpus_allowed.
This silences compiler warnings.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6ab0fcbf26e9..521b89b01480 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5486,7 +5486,7 @@ static inline void sched_init_granularity(void) | |||
5486 | * task must not exit() & deallocate itself prematurely. The | 5486 | * task must not exit() & deallocate itself prematurely. The |
5487 | * call is not atomic; no spinlocks may be held. | 5487 | * call is not atomic; no spinlocks may be held. |
5488 | */ | 5488 | */ |
5489 | int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 5489 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) |
5490 | { | 5490 | { |
5491 | struct migration_req req; | 5491 | struct migration_req req; |
5492 | unsigned long flags; | 5492 | unsigned long flags; |
@@ -5494,23 +5494,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
5494 | int ret = 0; | 5494 | int ret = 0; |
5495 | 5495 | ||
5496 | rq = task_rq_lock(p, &flags); | 5496 | rq = task_rq_lock(p, &flags); |
5497 | if (!cpus_intersects(new_mask, cpu_online_map)) { | 5497 | if (!cpus_intersects(*new_mask, cpu_online_map)) { |
5498 | ret = -EINVAL; | 5498 | ret = -EINVAL; |
5499 | goto out; | 5499 | goto out; |
5500 | } | 5500 | } |
5501 | 5501 | ||
5502 | if (p->sched_class->set_cpus_allowed) | 5502 | if (p->sched_class->set_cpus_allowed) |
5503 | p->sched_class->set_cpus_allowed(p, &new_mask); | 5503 | p->sched_class->set_cpus_allowed(p, new_mask); |
5504 | else { | 5504 | else { |
5505 | p->cpus_allowed = new_mask; | 5505 | p->cpus_allowed = *new_mask; |
5506 | p->rt.nr_cpus_allowed = cpus_weight(new_mask); | 5506 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | /* Can the task run on the task's current CPU? If so, we're done */ | 5509 | /* Can the task run on the task's current CPU? If so, we're done */ |
5510 | if (cpu_isset(task_cpu(p), new_mask)) | 5510 | if (cpu_isset(task_cpu(p), *new_mask)) |
5511 | goto out; | 5511 | goto out; |
5512 | 5512 | ||
5513 | if (migrate_task(p, any_online_cpu(new_mask), &req)) { | 5513 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { |
5514 | /* Need help from migration thread: drop lock and wait. */ | 5514 | /* Need help from migration thread: drop lock and wait. */ |
5515 | task_rq_unlock(rq, &flags); | 5515 | task_rq_unlock(rq, &flags); |
5516 | wake_up_process(rq->migration_thread); | 5516 | wake_up_process(rq->migration_thread); |
@@ -5523,7 +5523,7 @@ out: | |||
5523 | 5523 | ||
5524 | return ret; | 5524 | return ret; |
5525 | } | 5525 | } |
5526 | EXPORT_SYMBOL_GPL(set_cpus_allowed); | 5526 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
5527 | 5527 | ||
5528 | /* | 5528 | /* |
5529 | * Move (not current) task off this cpu, onto dest cpu. We're doing | 5529 | * Move (not current) task off this cpu, onto dest cpu. We're doing |