summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-04-23 10:26:36 -0400
committerIngo Molnar <mingo@kernel.org>2019-06-03 05:49:37 -0400
commit3bd3706251ee8ab67e69d9340ac2abdca217e733 (patch)
tree4431aa630d095905d840ace6a0b86e266395f71a /kernel
parentf2c7c76c5d0a443053e94adb9f0918fa2fb85c3a (diff)
sched/core: Provide a pointer to the valid CPU mask
In commit: 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not much difference in !RT but in RT we used this to implement migrate_disable(). Within a migrate_disable() section the CPU mask is restricted to single CPU while the "normal" CPU mask remains untouched. As an alternative implementation Ingo suggested to use: struct task_struct { const cpumask_t *cpus_ptr; cpumask_t cpus_mask; }; with t->cpus_ptr = &t->cpus_mask; In -RT we then can switch the cpus_ptr to: t->cpus_ptr = &cpumask_of(task_cpu(p)); in a migration disabled region. The rules are simple: - Code that 'uses' ->cpus_allowed would use the pointer. - Code that 'modifies' ->cpus_allowed would use the direct mask. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/20190423142636.14347-1-bigeasy@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/sched/core.c40
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/fair.c34
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/trace/trace_hwlat.c2
9 files changed, 50 insertions, 48 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 6a1942ed781c..fe90fa1899e6 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2829,7 +2829,7 @@ static void cpuset_fork(struct task_struct *task)
2829 if (task_css_is_root(task, cpuset_cgrp_id)) 2829 if (task_css_is_root(task, cpuset_cgrp_id))
2830 return; 2830 return;
2831 2831
2832 set_cpus_allowed_ptr(task, &current->cpus_allowed); 2832 set_cpus_allowed_ptr(task, current->cpus_ptr);
2833 task->mems_allowed = current->mems_allowed; 2833 task->mems_allowed = current->mems_allowed;
2834} 2834}
2835 2835
diff --git a/kernel/fork.c b/kernel/fork.c
index 75675b9bf6df..6be686283e55 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -894,6 +894,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
894#ifdef CONFIG_STACKPROTECTOR 894#ifdef CONFIG_STACKPROTECTOR
895 tsk->stack_canary = get_random_canary(); 895 tsk->stack_canary = get_random_canary();
896#endif 896#endif
897 if (orig->cpus_ptr == &orig->cpus_mask)
898 tsk->cpus_ptr = &tsk->cpus_mask;
897 899
898 /* 900 /*
899 * One for us, one for whoever does the "release_task()" (usually 901 * One for us, one for whoever does the "release_task()" (usually
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 874c427742a9..93ab85f0d076 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -930,7 +930,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
930 */ 930 */
931static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 931static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
932{ 932{
933 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 933 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
934 return false; 934 return false;
935 935
936 if (is_per_cpu_kthread(p)) 936 if (is_per_cpu_kthread(p))
@@ -1025,7 +1025,7 @@ static int migration_cpu_stop(void *data)
1025 local_irq_disable(); 1025 local_irq_disable();
1026 /* 1026 /*
1027 * We need to explicitly wake pending tasks before running 1027 * We need to explicitly wake pending tasks before running
1028 * __migrate_task() such that we will not miss enforcing cpus_allowed 1028 * __migrate_task() such that we will not miss enforcing cpus_ptr
1029 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1029 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1030 */ 1030 */
1031 sched_ttwu_pending(); 1031 sched_ttwu_pending();
@@ -1056,7 +1056,7 @@ static int migration_cpu_stop(void *data)
1056 */ 1056 */
1057void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1057void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1058{ 1058{
1059 cpumask_copy(&p->cpus_allowed, new_mask); 1059 cpumask_copy(&p->cpus_mask, new_mask);
1060 p->nr_cpus_allowed = cpumask_weight(new_mask); 1060 p->nr_cpus_allowed = cpumask_weight(new_mask);
1061} 1061}
1062 1062
@@ -1126,7 +1126,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1126 goto out; 1126 goto out;
1127 } 1127 }
1128 1128
1129 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1129 if (cpumask_equal(p->cpus_ptr, new_mask))
1130 goto out; 1130 goto out;
1131 1131
1132 if (!cpumask_intersects(new_mask, cpu_valid_mask)) { 1132 if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
@@ -1286,10 +1286,10 @@ static int migrate_swap_stop(void *data)
1286 if (task_cpu(arg->src_task) != arg->src_cpu) 1286 if (task_cpu(arg->src_task) != arg->src_cpu)
1287 goto unlock; 1287 goto unlock;
1288 1288
1289 if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) 1289 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
1290 goto unlock; 1290 goto unlock;
1291 1291
1292 if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) 1292 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
1293 goto unlock; 1293 goto unlock;
1294 1294
1295 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1295 __migrate_swap_task(arg->src_task, arg->dst_cpu);
@@ -1331,10 +1331,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
1331 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1331 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1332 goto out; 1332 goto out;
1333 1333
1334 if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) 1334 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
1335 goto out; 1335 goto out;
1336 1336
1337 if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) 1337 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
1338 goto out; 1338 goto out;
1339 1339
1340 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1340 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
@@ -1479,7 +1479,7 @@ void kick_process(struct task_struct *p)
1479EXPORT_SYMBOL_GPL(kick_process); 1479EXPORT_SYMBOL_GPL(kick_process);
1480 1480
1481/* 1481/*
1482 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1482 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
1483 * 1483 *
1484 * A few notes on cpu_active vs cpu_online: 1484 * A few notes on cpu_active vs cpu_online:
1485 * 1485 *
@@ -1519,14 +1519,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1519 for_each_cpu(dest_cpu, nodemask) { 1519 for_each_cpu(dest_cpu, nodemask) {
1520 if (!cpu_active(dest_cpu)) 1520 if (!cpu_active(dest_cpu))
1521 continue; 1521 continue;
1522 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 1522 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
1523 return dest_cpu; 1523 return dest_cpu;
1524 } 1524 }
1525 } 1525 }
1526 1526
1527 for (;;) { 1527 for (;;) {
1528 /* Any allowed, online CPU? */ 1528 /* Any allowed, online CPU? */
1529 for_each_cpu(dest_cpu, &p->cpus_allowed) { 1529 for_each_cpu(dest_cpu, p->cpus_ptr) {
1530 if (!is_cpu_allowed(p, dest_cpu)) 1530 if (!is_cpu_allowed(p, dest_cpu))
1531 continue; 1531 continue;
1532 1532
@@ -1570,7 +1570,7 @@ out:
1570} 1570}
1571 1571
1572/* 1572/*
1573 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1573 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
1574 */ 1574 */
1575static inline 1575static inline
1576int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1576int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
@@ -1580,11 +1580,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1580 if (p->nr_cpus_allowed > 1) 1580 if (p->nr_cpus_allowed > 1)
1581 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1581 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1582 else 1582 else
1583 cpu = cpumask_any(&p->cpus_allowed); 1583 cpu = cpumask_any(p->cpus_ptr);
1584 1584
1585 /* 1585 /*
1586 * In order not to call set_task_cpu() on a blocking task we need 1586 * In order not to call set_task_cpu() on a blocking task we need
1587 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1587 * to rely on ttwu() to place the task on a valid ->cpus_ptr
1588 * CPU. 1588 * CPU.
1589 * 1589 *
1590 * Since this is common to all placement strategies, this lives here. 1590 * Since this is common to all placement strategies, this lives here.
@@ -2395,7 +2395,7 @@ void wake_up_new_task(struct task_struct *p)
2395#ifdef CONFIG_SMP 2395#ifdef CONFIG_SMP
2396 /* 2396 /*
2397 * Fork balancing, do it here and not earlier because: 2397 * Fork balancing, do it here and not earlier because:
2398 * - cpus_allowed can change in the fork path 2398 * - cpus_ptr can change in the fork path
2399 * - any previously selected CPU might disappear through hotplug 2399 * - any previously selected CPU might disappear through hotplug
2400 * 2400 *
2401 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 2401 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
@@ -4267,7 +4267,7 @@ change:
4267 * the entire root_domain to become SCHED_DEADLINE. We 4267 * the entire root_domain to become SCHED_DEADLINE. We
4268 * will also fail if there's no bandwidth available. 4268 * will also fail if there's no bandwidth available.
4269 */ 4269 */
4270 if (!cpumask_subset(span, &p->cpus_allowed) || 4270 if (!cpumask_subset(span, p->cpus_ptr) ||
4271 rq->rd->dl_bw.bw == 0) { 4271 rq->rd->dl_bw.bw == 0) {
4272 task_rq_unlock(rq, p, &rf); 4272 task_rq_unlock(rq, p, &rf);
4273 return -EPERM; 4273 return -EPERM;
@@ -4866,7 +4866,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
4866 goto out_unlock; 4866 goto out_unlock;
4867 4867
4868 raw_spin_lock_irqsave(&p->pi_lock, flags); 4868 raw_spin_lock_irqsave(&p->pi_lock, flags);
4869 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4869 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
4870 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4870 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4871 4871
4872out_unlock: 4872out_unlock:
@@ -5443,7 +5443,7 @@ int task_can_attach(struct task_struct *p,
5443 * allowed nodes is unnecessary. Thus, cpusets are not 5443 * allowed nodes is unnecessary. Thus, cpusets are not
5444 * applicable for such threads. This prevents checking for 5444 * applicable for such threads. This prevents checking for
5445 * success of set_cpus_allowed_ptr() on all attached tasks 5445 * success of set_cpus_allowed_ptr() on all attached tasks
5446 * before cpus_allowed may be changed. 5446 * before cpus_mask may be changed.
5447 */ 5447 */
5448 if (p->flags & PF_NO_SETAFFINITY) { 5448 if (p->flags & PF_NO_SETAFFINITY) {
5449 ret = -EINVAL; 5449 ret = -EINVAL;
@@ -5470,7 +5470,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
5470 if (curr_cpu == target_cpu) 5470 if (curr_cpu == target_cpu)
5471 return 0; 5471 return 0;
5472 5472
5473 if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) 5473 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
5474 return -EINVAL; 5474 return -EINVAL;
5475 5475
5476 /* TODO: This is not properly updating schedstats */ 5476 /* TODO: This is not properly updating schedstats */
@@ -5608,7 +5608,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
5608 put_prev_task(rq, next); 5608 put_prev_task(rq, next);
5609 5609
5610 /* 5610 /*
5611 * Rules for changing task_struct::cpus_allowed are holding 5611 * Rules for changing task_struct::cpus_mask are holding
5612 * both pi_lock and rq->lock, such that holding either 5612 * both pi_lock and rq->lock, such that holding either
5613 * stabilizes the mask. 5613 * stabilizes the mask.
5614 * 5614 *
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 50316455ea66..d57fb2f8ae67 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
124 const struct sched_dl_entity *dl_se = &p->dl; 124 const struct sched_dl_entity *dl_se = &p->dl;
125 125
126 if (later_mask && 126 if (later_mask &&
127 cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { 127 cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
128 return 1; 128 return 1;
129 } else { 129 } else {
130 int best_cpu = cpudl_maximum(cp); 130 int best_cpu = cpudl_maximum(cp);
131 131
132 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); 132 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
133 133
134 if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && 134 if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
135 dl_time_before(dl_se->deadline, cp->elements[0].dl)) { 135 dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
136 if (later_mask) 136 if (later_mask)
137 cpumask_set_cpu(best_cpu, later_mask); 137 cpumask_set_cpu(best_cpu, later_mask);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index daaadf939ccb..f7d2c10b4c92 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
98 if (skip) 98 if (skip)
99 continue; 99 continue;
100 100
101 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 101 if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
102 continue; 102 continue;
103 103
104 if (lowest_mask) { 104 if (lowest_mask) {
105 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 105 cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
106 106
107 /* 107 /*
108 * We have to ensure that we have at least one bit 108 * We have to ensure that we have at least one bit
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 43901fa3f269..c1ef30861068 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
538 * If we cannot preempt any rq, fall back to pick any 538 * If we cannot preempt any rq, fall back to pick any
539 * online CPU: 539 * online CPU:
540 */ 540 */
541 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); 541 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
542 if (cpu >= nr_cpu_ids) { 542 if (cpu >= nr_cpu_ids) {
543 /* 543 /*
544 * Failed to find any suitable CPU. 544 * Failed to find any suitable CPU.
@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq *rq)
1824static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1824static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1825{ 1825{
1826 if (!task_running(rq, p) && 1826 if (!task_running(rq, p) &&
1827 cpumask_test_cpu(cpu, &p->cpus_allowed)) 1827 cpumask_test_cpu(cpu, p->cpus_ptr))
1828 return 1; 1828 return 1;
1829 return 0; 1829 return 0;
1830} 1830}
@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1974 /* Retry if something changed. */ 1974 /* Retry if something changed. */
1975 if (double_lock_balance(rq, later_rq)) { 1975 if (double_lock_balance(rq, later_rq)) {
1976 if (unlikely(task_rq(task) != rq || 1976 if (unlikely(task_rq(task) != rq ||
1977 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || 1977 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
1978 task_running(rq, task) || 1978 task_running(rq, task) ||
1979 !dl_task(task) || 1979 !dl_task(task) ||
1980 !task_on_rq_queued(task))) { 1980 !task_on_rq_queued(task))) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f35930f5e528..8691a8fffe40 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1621,7 +1621,7 @@ static void task_numa_compare(struct task_numa_env *env,
1621 * be incurred if the tasks were swapped. 1621 * be incurred if the tasks were swapped.
1622 */ 1622 */
1623 /* Skip this swap candidate if cannot move to the source cpu */ 1623 /* Skip this swap candidate if cannot move to the source cpu */
1624 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) 1624 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
1625 goto unlock; 1625 goto unlock;
1626 1626
1627 /* 1627 /*
@@ -1718,7 +1718,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
1718 1718
1719 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1719 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1720 /* Skip this CPU if the source task cannot migrate */ 1720 /* Skip this CPU if the source task cannot migrate */
1721 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) 1721 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
1722 continue; 1722 continue;
1723 1723
1724 env->dst_cpu = cpu; 1724 env->dst_cpu = cpu;
@@ -5831,7 +5831,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5831 5831
5832 /* Skip over this group if it has no CPUs allowed */ 5832 /* Skip over this group if it has no CPUs allowed */
5833 if (!cpumask_intersects(sched_group_span(group), 5833 if (!cpumask_intersects(sched_group_span(group),
5834 &p->cpus_allowed)) 5834 p->cpus_ptr))
5835 continue; 5835 continue;
5836 5836
5837 local_group = cpumask_test_cpu(this_cpu, 5837 local_group = cpumask_test_cpu(this_cpu,
@@ -5963,7 +5963,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
5963 return cpumask_first(sched_group_span(group)); 5963 return cpumask_first(sched_group_span(group));
5964 5964
5965 /* Traverse only the allowed CPUs */ 5965 /* Traverse only the allowed CPUs */
5966 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { 5966 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
5967 if (available_idle_cpu(i)) { 5967 if (available_idle_cpu(i)) {
5968 struct rq *rq = cpu_rq(i); 5968 struct rq *rq = cpu_rq(i);
5969 struct cpuidle_state *idle = idle_get_state(rq); 5969 struct cpuidle_state *idle = idle_get_state(rq);
@@ -6003,7 +6003,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
6003{ 6003{
6004 int new_cpu = cpu; 6004 int new_cpu = cpu;
6005 6005
6006 if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) 6006 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
6007 return prev_cpu; 6007 return prev_cpu;
6008 6008
6009 /* 6009 /*
@@ -6120,7 +6120,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
6120 if (!test_idle_cores(target, false)) 6120 if (!test_idle_cores(target, false))
6121 return -1; 6121 return -1;
6122 6122
6123 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); 6123 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6124 6124
6125 for_each_cpu_wrap(core, cpus, target) { 6125 for_each_cpu_wrap(core, cpus, target) {
6126 bool idle = true; 6126 bool idle = true;
@@ -6154,7 +6154,7 @@ static int select_idle_smt(struct task_struct *p, int target)
6154 return -1; 6154 return -1;
6155 6155
6156 for_each_cpu(cpu, cpu_smt_mask(target)) { 6156 for_each_cpu(cpu, cpu_smt_mask(target)) {
6157 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6157 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
6158 continue; 6158 continue;
6159 if (available_idle_cpu(cpu)) 6159 if (available_idle_cpu(cpu))
6160 return cpu; 6160 return cpu;
@@ -6217,7 +6217,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
6217 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { 6217 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
6218 if (!--nr) 6218 if (!--nr)
6219 return -1; 6219 return -1;
6220 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6220 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
6221 continue; 6221 continue;
6222 if (available_idle_cpu(cpu)) 6222 if (available_idle_cpu(cpu))
6223 break; 6223 break;
@@ -6254,7 +6254,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
6254 recent_used_cpu != target && 6254 recent_used_cpu != target &&
6255 cpus_share_cache(recent_used_cpu, target) && 6255 cpus_share_cache(recent_used_cpu, target) &&
6256 available_idle_cpu(recent_used_cpu) && 6256 available_idle_cpu(recent_used_cpu) &&
6257 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { 6257 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
6258 /* 6258 /*
6259 * Replace recent_used_cpu with prev as it is a potential 6259 * Replace recent_used_cpu with prev as it is a potential
6260 * candidate for the next wake: 6260 * candidate for the next wake:
@@ -6600,7 +6600,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
6600 int max_spare_cap_cpu = -1; 6600 int max_spare_cap_cpu = -1;
6601 6601
6602 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { 6602 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
6603 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6603 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
6604 continue; 6604 continue;
6605 6605
6606 /* Skip CPUs that will be overutilized. */ 6606 /* Skip CPUs that will be overutilized. */
@@ -6689,7 +6689,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
6689 } 6689 }
6690 6690
6691 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && 6691 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
6692 cpumask_test_cpu(cpu, &p->cpus_allowed); 6692 cpumask_test_cpu(cpu, p->cpus_ptr);
6693 } 6693 }
6694 6694
6695 rcu_read_lock(); 6695 rcu_read_lock();
@@ -7445,14 +7445,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
7445 /* 7445 /*
7446 * We do not migrate tasks that are: 7446 * We do not migrate tasks that are:
7447 * 1) throttled_lb_pair, or 7447 * 1) throttled_lb_pair, or
7448 * 2) cannot be migrated to this CPU due to cpus_allowed, or 7448 * 2) cannot be migrated to this CPU due to cpus_ptr, or
7449 * 3) running (obviously), or 7449 * 3) running (obviously), or
7450 * 4) are cache-hot on their current CPU. 7450 * 4) are cache-hot on their current CPU.
7451 */ 7451 */
7452 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 7452 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
7453 return 0; 7453 return 0;
7454 7454
7455 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { 7455 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
7456 int cpu; 7456 int cpu;
7457 7457
7458 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); 7458 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
@@ -7472,7 +7472,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
7472 7472
7473 /* Prevent to re-select dst_cpu via env's CPUs: */ 7473 /* Prevent to re-select dst_cpu via env's CPUs: */
7474 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 7474 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
7475 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { 7475 if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
7476 env->flags |= LBF_DST_PINNED; 7476 env->flags |= LBF_DST_PINNED;
7477 env->new_dst_cpu = cpu; 7477 env->new_dst_cpu = cpu;
7478 break; 7478 break;
@@ -8099,7 +8099,7 @@ static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
8099 8099
8100/* 8100/*
8101 * Group imbalance indicates (and tries to solve) the problem where balancing 8101 * Group imbalance indicates (and tries to solve) the problem where balancing
8102 * groups is inadequate due to ->cpus_allowed constraints. 8102 * groups is inadequate due to ->cpus_ptr constraints.
8103 * 8103 *
8104 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 8104 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
8105 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. 8105 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
@@ -8768,7 +8768,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
8768 /* 8768 /*
8769 * If the busiest group is imbalanced the below checks don't 8769 * If the busiest group is imbalanced the below checks don't
8770 * work because they assume all things are equal, which typically 8770 * work because they assume all things are equal, which typically
8771 * isn't true due to cpus_allowed constraints and the like. 8771 * isn't true due to cpus_ptr constraints and the like.
8772 */ 8772 */
8773 if (busiest->group_type == group_imbalanced) 8773 if (busiest->group_type == group_imbalanced)
8774 goto force_balance; 8774 goto force_balance;
@@ -9210,7 +9210,7 @@ more_balance:
9210 * if the curr task on busiest CPU can't be 9210 * if the curr task on busiest CPU can't be
9211 * moved to this_cpu: 9211 * moved to this_cpu:
9212 */ 9212 */
9213 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 9213 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
9214 raw_spin_unlock_irqrestore(&busiest->lock, 9214 raw_spin_unlock_irqrestore(&busiest->lock,
9215 flags); 9215 flags);
9216 env.flags |= LBF_ALL_PINNED; 9216 env.flags |= LBF_ALL_PINNED;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1e6b909dca36..63ad7c90822c 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1614,7 +1614,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1614static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1614static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1615{ 1615{
1616 if (!task_running(rq, p) && 1616 if (!task_running(rq, p) &&
1617 cpumask_test_cpu(cpu, &p->cpus_allowed)) 1617 cpumask_test_cpu(cpu, p->cpus_ptr))
1618 return 1; 1618 return 1;
1619 1619
1620 return 0; 1620 return 0;
@@ -1751,7 +1751,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1751 * Also make sure that it wasn't scheduled on its rq. 1751 * Also make sure that it wasn't scheduled on its rq.
1752 */ 1752 */
1753 if (unlikely(task_rq(task) != rq || 1753 if (unlikely(task_rq(task) != rq ||
1754 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || 1754 !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
1755 task_running(rq, task) || 1755 task_running(rq, task) ||
1756 !rt_task(task) || 1756 !rt_task(task) ||
1757 !task_on_rq_queued(task))) { 1757 !task_on_rq_queued(task))) {
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 1e6db9cbe4dc..fa95139445b2 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -277,7 +277,7 @@ static void move_to_next_cpu(void)
277 * of this thread, than stop migrating for the duration 277 * of this thread, than stop migrating for the duration
278 * of the current test. 278 * of the current test.
279 */ 279 */
280 if (!cpumask_equal(current_mask, &current->cpus_allowed)) 280 if (!cpumask_equal(current_mask, current->cpus_ptr))
281 goto disable; 281 goto disable;
282 282
283 get_online_cpus(); 283 get_online_cpus();