diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2acdf19c5f7c..ef5bbf760a08 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -981,7 +981,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_ | |||
981 | return rq; | 981 | return rq; |
982 | 982 | ||
983 | /* Affinity changed (again). */ | 983 | /* Affinity changed (again). */ |
984 | if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) | 984 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
985 | return rq; | 985 | return rq; |
986 | 986 | ||
987 | rq = move_queued_task(rq, p, dest_cpu); | 987 | rq = move_queued_task(rq, p, dest_cpu); |
@@ -1259,10 +1259,10 @@ static int migrate_swap_stop(void *data) | |||
1259 | if (task_cpu(arg->src_task) != arg->src_cpu) | 1259 | if (task_cpu(arg->src_task) != arg->src_cpu) |
1260 | goto unlock; | 1260 | goto unlock; |
1261 | 1261 | ||
1262 | if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) | 1262 | if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) |
1263 | goto unlock; | 1263 | goto unlock; |
1264 | 1264 | ||
1265 | if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) | 1265 | if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) |
1266 | goto unlock; | 1266 | goto unlock; |
1267 | 1267 | ||
1268 | __migrate_swap_task(arg->src_task, arg->dst_cpu); | 1268 | __migrate_swap_task(arg->src_task, arg->dst_cpu); |
@@ -1303,10 +1303,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p) | |||
1303 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) | 1303 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) |
1304 | goto out; | 1304 | goto out; |
1305 | 1305 | ||
1306 | if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) | 1306 | if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) |
1307 | goto out; | 1307 | goto out; |
1308 | 1308 | ||
1309 | if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) | 1309 | if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) |
1310 | goto out; | 1310 | goto out; |
1311 | 1311 | ||
1312 | trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); | 1312 | trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); |
@@ -1490,14 +1490,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
1490 | for_each_cpu(dest_cpu, nodemask) { | 1490 | for_each_cpu(dest_cpu, nodemask) { |
1491 | if (!cpu_active(dest_cpu)) | 1491 | if (!cpu_active(dest_cpu)) |
1492 | continue; | 1492 | continue; |
1493 | if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) | 1493 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
1494 | return dest_cpu; | 1494 | return dest_cpu; |
1495 | } | 1495 | } |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | for (;;) { | 1498 | for (;;) { |
1499 | /* Any allowed, online CPU? */ | 1499 | /* Any allowed, online CPU? */ |
1500 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { | 1500 | for_each_cpu(dest_cpu, &p->cpus_allowed) { |
1501 | if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) | 1501 | if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) |
1502 | continue; | 1502 | continue; |
1503 | if (!cpu_online(dest_cpu)) | 1503 | if (!cpu_online(dest_cpu)) |
@@ -1552,7 +1552,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) | |||
1552 | if (tsk_nr_cpus_allowed(p) > 1) | 1552 | if (tsk_nr_cpus_allowed(p) > 1) |
1553 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); | 1553 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); |
1554 | else | 1554 | else |
1555 | cpu = cpumask_any(tsk_cpus_allowed(p)); | 1555 | cpu = cpumask_any(&p->cpus_allowed); |
1556 | 1556 | ||
1557 | /* | 1557 | /* |
1558 | * In order not to call set_task_cpu() on a blocking task we need | 1558 | * In order not to call set_task_cpu() on a blocking task we need |
@@ -1564,7 +1564,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) | |||
1564 | * [ this allows ->select_task() to simply return task_cpu(p) and | 1564 | * [ this allows ->select_task() to simply return task_cpu(p) and |
1565 | * not worry about this generic constraint ] | 1565 | * not worry about this generic constraint ] |
1566 | */ | 1566 | */ |
1567 | if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || | 1567 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || |
1568 | !cpu_online(cpu))) | 1568 | !cpu_online(cpu))) |
1569 | cpu = select_fallback_rq(task_cpu(p), p); | 1569 | cpu = select_fallback_rq(task_cpu(p), p); |
1570 | 1570 | ||
@@ -5473,7 +5473,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) | |||
5473 | if (curr_cpu == target_cpu) | 5473 | if (curr_cpu == target_cpu) |
5474 | return 0; | 5474 | return 0; |
5475 | 5475 | ||
5476 | if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) | 5476 | if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) |
5477 | return -EINVAL; | 5477 | return -EINVAL; |
5478 | 5478 | ||
5479 | /* TODO: This is not properly updating schedstats */ | 5479 | /* TODO: This is not properly updating schedstats */ |