aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/sched_rt.c3
2 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6ab0fcbf26e9..521b89b01480 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5486,7 +5486,7 @@ static inline void sched_init_granularity(void)
5486 * task must not exit() & deallocate itself prematurely. The 5486 * task must not exit() & deallocate itself prematurely. The
5487 * call is not atomic; no spinlocks may be held. 5487 * call is not atomic; no spinlocks may be held.
5488 */ 5488 */
5489int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 5489int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
5490{ 5490{
5491 struct migration_req req; 5491 struct migration_req req;
5492 unsigned long flags; 5492 unsigned long flags;
@@ -5494,23 +5494,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
5494 int ret = 0; 5494 int ret = 0;
5495 5495
5496 rq = task_rq_lock(p, &flags); 5496 rq = task_rq_lock(p, &flags);
5497 if (!cpus_intersects(new_mask, cpu_online_map)) { 5497 if (!cpus_intersects(*new_mask, cpu_online_map)) {
5498 ret = -EINVAL; 5498 ret = -EINVAL;
5499 goto out; 5499 goto out;
5500 } 5500 }
5501 5501
5502 if (p->sched_class->set_cpus_allowed) 5502 if (p->sched_class->set_cpus_allowed)
5503 p->sched_class->set_cpus_allowed(p, &new_mask); 5503 p->sched_class->set_cpus_allowed(p, new_mask);
5504 else { 5504 else {
5505 p->cpus_allowed = new_mask; 5505 p->cpus_allowed = *new_mask;
5506 p->rt.nr_cpus_allowed = cpus_weight(new_mask); 5506 p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
5507 } 5507 }
5508 5508
5509 /* Can the task run on the task's current CPU? If so, we're done */ 5509 /* Can the task run on the task's current CPU? If so, we're done */
5510 if (cpu_isset(task_cpu(p), new_mask)) 5510 if (cpu_isset(task_cpu(p), *new_mask))
5511 goto out; 5511 goto out;
5512 5512
5513 if (migrate_task(p, any_online_cpu(new_mask), &req)) { 5513 if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
5514 /* Need help from migration thread: drop lock and wait. */ 5514 /* Need help from migration thread: drop lock and wait. */
5515 task_rq_unlock(rq, &flags); 5515 task_rq_unlock(rq, &flags);
5516 wake_up_process(rq->migration_thread); 5516 wake_up_process(rq->migration_thread);
@@ -5523,7 +5523,7 @@ out:
5523 5523
5524 return ret; 5524 return ret;
5525} 5525}
5526EXPORT_SYMBOL_GPL(set_cpus_allowed); 5526EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
5527 5527
5528/* 5528/*
5529 * Move (not current) task off this cpu, onto dest cpu. We're doing 5529 * Move (not current) task off this cpu, onto dest cpu. We're doing
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 6928ded24da1..8ff824565e06 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1123,7 +1123,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1123 return 0; 1123 return 0;
1124} 1124}
1125 1125
1126static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask) 1126static void set_cpus_allowed_rt(struct task_struct *p,
1127 const cpumask_t *new_mask)
1127{ 1128{
1128 int weight = cpus_weight(*new_mask); 1129 int weight = cpus_weight(*new_mask);
1129 1130