diff options
-rw-r--r-- | include/linux/sched.h | 15 | ||||
-rw-r--r-- | kernel/sched.c | 16 | ||||
-rw-r--r-- | kernel/sched_rt.c | 3 |
3 files changed, 21 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 383502dfda17..79c025c3b627 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -889,7 +889,8 @@ struct sched_class { | |||
889 | void (*set_curr_task) (struct rq *rq); | 889 | void (*set_curr_task) (struct rq *rq); |
890 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 890 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
891 | void (*task_new) (struct rq *rq, struct task_struct *p); | 891 | void (*task_new) (struct rq *rq, struct task_struct *p); |
892 | void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); | 892 | void (*set_cpus_allowed)(struct task_struct *p, |
893 | const cpumask_t *newmask); | ||
893 | 894 | ||
894 | void (*join_domain)(struct rq *rq); | 895 | void (*join_domain)(struct rq *rq); |
895 | void (*leave_domain)(struct rq *rq); | 896 | void (*leave_domain)(struct rq *rq); |
@@ -1502,15 +1503,21 @@ static inline void put_task_struct(struct task_struct *t) | |||
1502 | #define used_math() tsk_used_math(current) | 1503 | #define used_math() tsk_used_math(current) |
1503 | 1504 | ||
1504 | #ifdef CONFIG_SMP | 1505 | #ifdef CONFIG_SMP |
1505 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); | 1506 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1507 | const cpumask_t *new_mask); | ||
1506 | #else | 1508 | #else |
1507 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 1509 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1510 | const cpumask_t *new_mask) | ||
1508 | { | 1511 | { |
1509 | if (!cpu_isset(0, new_mask)) | 1512 | if (!cpu_isset(0, *new_mask)) |
1510 | return -EINVAL; | 1513 | return -EINVAL; |
1511 | return 0; | 1514 | return 0; |
1512 | } | 1515 | } |
1513 | #endif | 1516 | #endif |
1517 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | ||
1518 | { | ||
1519 | return set_cpus_allowed_ptr(p, &new_mask); | ||
1520 | } | ||
1514 | 1521 | ||
1515 | extern unsigned long long sched_clock(void); | 1522 | extern unsigned long long sched_clock(void); |
1516 | 1523 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 6ab0fcbf26e9..521b89b01480 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5486,7 +5486,7 @@ static inline void sched_init_granularity(void) | |||
5486 | * task must not exit() & deallocate itself prematurely. The | 5486 | * task must not exit() & deallocate itself prematurely. The |
5487 | * call is not atomic; no spinlocks may be held. | 5487 | * call is not atomic; no spinlocks may be held. |
5488 | */ | 5488 | */ |
5489 | int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 5489 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) |
5490 | { | 5490 | { |
5491 | struct migration_req req; | 5491 | struct migration_req req; |
5492 | unsigned long flags; | 5492 | unsigned long flags; |
@@ -5494,23 +5494,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
5494 | int ret = 0; | 5494 | int ret = 0; |
5495 | 5495 | ||
5496 | rq = task_rq_lock(p, &flags); | 5496 | rq = task_rq_lock(p, &flags); |
5497 | if (!cpus_intersects(new_mask, cpu_online_map)) { | 5497 | if (!cpus_intersects(*new_mask, cpu_online_map)) { |
5498 | ret = -EINVAL; | 5498 | ret = -EINVAL; |
5499 | goto out; | 5499 | goto out; |
5500 | } | 5500 | } |
5501 | 5501 | ||
5502 | if (p->sched_class->set_cpus_allowed) | 5502 | if (p->sched_class->set_cpus_allowed) |
5503 | p->sched_class->set_cpus_allowed(p, &new_mask); | 5503 | p->sched_class->set_cpus_allowed(p, new_mask); |
5504 | else { | 5504 | else { |
5505 | p->cpus_allowed = new_mask; | 5505 | p->cpus_allowed = *new_mask; |
5506 | p->rt.nr_cpus_allowed = cpus_weight(new_mask); | 5506 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | /* Can the task run on the task's current CPU? If so, we're done */ | 5509 | /* Can the task run on the task's current CPU? If so, we're done */ |
5510 | if (cpu_isset(task_cpu(p), new_mask)) | 5510 | if (cpu_isset(task_cpu(p), *new_mask)) |
5511 | goto out; | 5511 | goto out; |
5512 | 5512 | ||
5513 | if (migrate_task(p, any_online_cpu(new_mask), &req)) { | 5513 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { |
5514 | /* Need help from migration thread: drop lock and wait. */ | 5514 | /* Need help from migration thread: drop lock and wait. */ |
5515 | task_rq_unlock(rq, &flags); | 5515 | task_rq_unlock(rq, &flags); |
5516 | wake_up_process(rq->migration_thread); | 5516 | wake_up_process(rq->migration_thread); |
@@ -5523,7 +5523,7 @@ out: | |||
5523 | 5523 | ||
5524 | return ret; | 5524 | return ret; |
5525 | } | 5525 | } |
5526 | EXPORT_SYMBOL_GPL(set_cpus_allowed); | 5526 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
5527 | 5527 | ||
5528 | /* | 5528 | /* |
5529 | * Move (not current) task off this cpu, onto dest cpu. We're doing | 5529 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 6928ded24da1..8ff824565e06 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1123,7 +1123,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1123 | return 0; | 1123 | return 0; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask) | 1126 | static void set_cpus_allowed_rt(struct task_struct *p, |
1127 | const cpumask_t *new_mask) | ||
1127 | { | 1128 | { |
1128 | int weight = cpus_weight(*new_mask); | 1129 | int weight = cpus_weight(*new_mask); |
1129 | 1130 | ||