aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2011-06-16 06:23:22 -0400
committerIngo Molnar <mingo@elte.hu>2011-10-06 06:46:56 -0400
commitfa17b507f142d37aeac322a95f6f7c6375f25601 (patch)
tree5ff8b9d7087175aca85dff3393932fe9b5497425 /kernel
parent6eb57e0d65ebd99a71d435dc96d83e725752eef8 (diff)
sched: Wrap scheduler p->cpus_allowed access
This task is preparatory for the migrate_disable() implementation, but stands on its own and provides a cleanup. It currently only converts those sites required for task-placement. Kosaki-san once mentioned replacing cpus_allowed with a proper cpumask_t instead of the NR_CPUS sized array it currently is, that would also require something like this. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Link: http://lkml.kernel.org/n/tip-e42skvaddos99psip0vce41o@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_fair.c12
-rw-r--r--kernel/sched_rt.c4
3 files changed, 12 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7bc9b0e84eb3..45174ca5c8ea 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2544,11 +2544,11 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2544 2544
2545 /* Look for allowed, online CPU in same node. */ 2545 /* Look for allowed, online CPU in same node. */
2546 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 2546 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2547 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 2547 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
2548 return dest_cpu; 2548 return dest_cpu;
2549 2549
2550 /* Any allowed, online CPU? */ 2550 /* Any allowed, online CPU? */
2551 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); 2551 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
2552 if (dest_cpu < nr_cpu_ids) 2552 if (dest_cpu < nr_cpu_ids)
2553 return dest_cpu; 2553 return dest_cpu;
2554 2554
@@ -2585,7 +2585,7 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2585 * [ this allows ->select_task() to simply return task_cpu(p) and 2585 * [ this allows ->select_task() to simply return task_cpu(p) and
2586 * not worry about this generic constraint ] 2586 * not worry about this generic constraint ]
2587 */ 2587 */
2588 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 2588 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
2589 !cpu_online(cpu))) 2589 !cpu_online(cpu)))
2590 cpu = select_fallback_rq(task_cpu(p), p); 2590 cpu = select_fallback_rq(task_cpu(p), p);
2591 2591
@@ -6262,7 +6262,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
6262 if (task_cpu(p) != src_cpu) 6262 if (task_cpu(p) != src_cpu)
6263 goto done; 6263 goto done;
6264 /* Affinity changed (again). */ 6264 /* Affinity changed (again). */
6265 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 6265 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
6266 goto fail; 6266 goto fail;
6267 6267
6268 /* 6268 /*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 506db0966eb8..5c9e67923b7c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2183,7 +2183,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
2183 2183
2184 /* Skip over this group if it has no CPUs allowed */ 2184 /* Skip over this group if it has no CPUs allowed */
2185 if (!cpumask_intersects(sched_group_cpus(group), 2185 if (!cpumask_intersects(sched_group_cpus(group),
2186 &p->cpus_allowed)) 2186 tsk_cpus_allowed(p)))
2187 continue; 2187 continue;
2188 2188
2189 local_group = cpumask_test_cpu(this_cpu, 2189 local_group = cpumask_test_cpu(this_cpu,
@@ -2229,7 +2229,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2229 int i; 2229 int i;
2230 2230
2231 /* Traverse only the allowed CPUs */ 2231 /* Traverse only the allowed CPUs */
2232 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { 2232 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
2233 load = weighted_cpuload(i); 2233 load = weighted_cpuload(i);
2234 2234
2235 if (load < min_load || (load == min_load && i == this_cpu)) { 2235 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2273,7 +2273,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
2273 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) 2273 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
2274 break; 2274 break;
2275 2275
2276 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { 2276 for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
2277 if (idle_cpu(i)) { 2277 if (idle_cpu(i)) {
2278 target = i; 2278 target = i;
2279 break; 2279 break;
@@ -2316,7 +2316,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2316 int sync = wake_flags & WF_SYNC; 2316 int sync = wake_flags & WF_SYNC;
2317 2317
2318 if (sd_flag & SD_BALANCE_WAKE) { 2318 if (sd_flag & SD_BALANCE_WAKE) {
2319 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) 2319 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
2320 want_affine = 1; 2320 want_affine = 1;
2321 new_cpu = prev_cpu; 2321 new_cpu = prev_cpu;
2322 } 2322 }
@@ -2697,7 +2697,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2697 * 2) cannot be migrated to this CPU due to cpus_allowed, or 2697 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2698 * 3) are cache-hot on their current CPU. 2698 * 3) are cache-hot on their current CPU.
2699 */ 2699 */
2700 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { 2700 if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
2701 schedstat_inc(p, se.statistics.nr_failed_migrations_affine); 2701 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
2702 return 0; 2702 return 0;
2703 } 2703 }
@@ -4087,7 +4087,7 @@ redo:
4087 * moved to this_cpu 4087 * moved to this_cpu
4088 */ 4088 */
4089 if (!cpumask_test_cpu(this_cpu, 4089 if (!cpumask_test_cpu(this_cpu,
4090 &busiest->curr->cpus_allowed)) { 4090 tsk_cpus_allowed(busiest->curr))) {
4091 raw_spin_unlock_irqrestore(&busiest->lock, 4091 raw_spin_unlock_irqrestore(&busiest->lock,
4092 flags); 4092 flags);
4093 all_pinned = 1; 4093 all_pinned = 1;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 0cc188cf7664..57a10842afa1 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1179,7 +1179,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1179static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1179static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1180{ 1180{
1181 if (!task_running(rq, p) && 1181 if (!task_running(rq, p) &&
1182 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && 1182 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1183 (p->rt.nr_cpus_allowed > 1)) 1183 (p->rt.nr_cpus_allowed > 1))
1184 return 1; 1184 return 1;
1185 return 0; 1185 return 0;
@@ -1324,7 +1324,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1324 */ 1324 */
1325 if (unlikely(task_rq(task) != rq || 1325 if (unlikely(task_rq(task) != rq ||
1326 !cpumask_test_cpu(lowest_rq->cpu, 1326 !cpumask_test_cpu(lowest_rq->cpu,
1327 &task->cpus_allowed) || 1327 tsk_cpus_allowed(task)) ||
1328 task_running(rq, task) || 1328 task_running(rq, task) ||
1329 !task->on_rq)) { 1329 !task->on_rq)) {
1330 1330