aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-05 09:38:10 -0500
committerIngo Molnar <mingo@kernel.org>2017-03-02 02:42:24 -0500
commit0c98d344fe5c27f6e4bce42ac503e9e9a51c7d1d (patch)
tree98a45a561f3cfe6a1be9942cfdf23af5f70ca7dd
parent59ddbcb2f45b958cf1f11f122b666cbcf50cd57b (diff)
sched/core: Remove the tsk_cpus_allowed() wrapper
So the original intention of tsk_cpus_allowed() was to 'future-proof' the field - but it's pretty ineffectual at that, because half of the code uses ->cpus_allowed directly ... Also, the wrapper makes the code longer than the original expression! So just get rid of it. This also shrinks <linux/sched.h> a bit. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/sparc/kernel/sysfs.c2
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c4
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched/core.c20
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/deadline.c7
-rw-r--r--kernel/sched/fair.c25
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--samples/trace_events/trace-events-sample.c2
16 files changed, 42 insertions, 48 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 573fb3a461b5..21fdf02583fe 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -795,7 +795,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
795 * se we pin us down to CPU 0 for a short while 795 * se we pin us down to CPU 0 for a short while
796 */ 796 */
797 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 797 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
798 cpumask_copy(old_mask, tsk_cpus_allowed(current)); 798 cpumask_copy(old_mask, &current->cpus_allowed);
799 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 799 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
800 800
801 if (smp_ops && smp_ops->setup_cpu) 801 if (smp_ops && smp_ops->setup_cpu)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 460f5f31d5cb..9b543df210fb 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -140,7 +140,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
140 * runqueue. The context will be rescheduled on the proper node 140 * runqueue. The context will be rescheduled on the proper node
141 * if it is timesliced or preempted. 141 * if it is timesliced or preempted.
142 */ 142 */
143 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); 143 cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
144 144
145 /* Save the current cpu id for spu interrupt routing. */ 145 /* Save the current cpu id for spu interrupt routing. */
146 ctx->last_ran = raw_smp_processor_id(); 146 ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 4808b6d23455..d63fc613e7a9 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -106,7 +106,7 @@ static unsigned long run_on_cpu(unsigned long cpu,
106 cpumask_t old_affinity; 106 cpumask_t old_affinity;
107 unsigned long ret; 107 unsigned long ret;
108 108
109 cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); 109 cpumask_copy(&old_affinity, &current->cpus_allowed);
110 /* should return -EINVAL to userspace */ 110 /* should return -EINVAL to userspace */
111 if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) 111 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
112 return 0; 112 return 0;
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index b73feeb666f9..35ddb6da93aa 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -234,7 +234,7 @@ static unsigned int us2e_freq_get(unsigned int cpu)
234 cpumask_t cpus_allowed; 234 cpumask_t cpus_allowed;
235 unsigned long clock_tick, estar; 235 unsigned long clock_tick, estar;
236 236
237 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); 237 cpumask_copy(&cpus_allowed, &current->cpus_allowed);
238 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 238 set_cpus_allowed_ptr(current, cpumask_of(cpu));
239 239
240 clock_tick = sparc64_get_clock_tick(cpu) / 1000; 240 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
@@ -252,7 +252,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
252 unsigned long clock_tick, divisor, old_divisor, estar; 252 unsigned long clock_tick, divisor, old_divisor, estar;
253 cpumask_t cpus_allowed; 253 cpumask_t cpus_allowed;
254 254
255 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); 255 cpumask_copy(&cpus_allowed, &current->cpus_allowed);
256 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 256 set_cpus_allowed_ptr(current, cpumask_of(cpu));
257 257
258 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; 258 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 9bb42ba50efa..a8d86a449ca1 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -82,7 +82,7 @@ static unsigned int us3_freq_get(unsigned int cpu)
82 unsigned long reg; 82 unsigned long reg;
83 unsigned int ret; 83 unsigned int ret;
84 84
85 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); 85 cpumask_copy(&cpus_allowed, &current->cpus_allowed);
86 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 86 set_cpus_allowed_ptr(current, cpumask_of(cpu));
87 87
88 reg = read_safari_cfg(); 88 reg = read_safari_cfg();
@@ -99,7 +99,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
99 unsigned long new_bits, new_freq, reg; 99 unsigned long new_bits, new_freq, reg;
100 cpumask_t cpus_allowed; 100 cpumask_t cpus_allowed;
101 101
102 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); 102 cpumask_copy(&cpus_allowed, &current->cpus_allowed);
103 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 103 set_cpus_allowed_ptr(current, cpumask_of(cpu));
104 104
105 new_freq = sparc64_get_clock_tick(cpu) / 1000; 105 new_freq = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 7a3d906b3671..e2cd2cd3b28a 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node)
576 struct hfi1_affinity_node *entry; 576 struct hfi1_affinity_node *entry;
577 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; 577 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
578 const struct cpumask *node_mask, 578 const struct cpumask *node_mask,
579 *proc_mask = tsk_cpus_allowed(current); 579 *proc_mask = &current->cpus_allowed;
580 struct hfi1_affinity_node_list *affinity = &node_affinity; 580 struct hfi1_affinity_node_list *affinity = &node_affinity;
581 struct cpu_mask_set *set = &affinity->proc; 581 struct cpu_mask_set *set = &affinity->proc;
582 582
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 1d81cac1fa6c..5cde1ecda0fe 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -856,7 +856,7 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
856{ 856{
857 struct sdma_rht_node *rht_node; 857 struct sdma_rht_node *rht_node;
858 struct sdma_engine *sde = NULL; 858 struct sdma_engine *sde = NULL;
859 const struct cpumask *current_mask = tsk_cpus_allowed(current); 859 const struct cpumask *current_mask = &current->cpus_allowed;
860 unsigned long cpu_id; 860 unsigned long cpu_id;
861 861
862 /* 862 /*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index df42cac04243..6d1cc20cc477 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1995,9 +1995,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1995} 1995}
1996#endif 1996#endif
1997 1997
1998/* Future-safe accessor for struct task_struct's cpus_allowed. */
1999#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
2000
2001static inline int tsk_nr_cpus_allowed(struct task_struct *p) 1998static inline int tsk_nr_cpus_allowed(struct task_struct *p)
2002{ 1999{
2003 return p->nr_cpus_allowed; 2000 return p->nr_cpus_allowed;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2acdf19c5f7c..ef5bbf760a08 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -981,7 +981,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
981 return rq; 981 return rq;
982 982
983 /* Affinity changed (again). */ 983 /* Affinity changed (again). */
984 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 984 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
985 return rq; 985 return rq;
986 986
987 rq = move_queued_task(rq, p, dest_cpu); 987 rq = move_queued_task(rq, p, dest_cpu);
@@ -1259,10 +1259,10 @@ static int migrate_swap_stop(void *data)
1259 if (task_cpu(arg->src_task) != arg->src_cpu) 1259 if (task_cpu(arg->src_task) != arg->src_cpu)
1260 goto unlock; 1260 goto unlock;
1261 1261
1262 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1262 if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
1263 goto unlock; 1263 goto unlock;
1264 1264
1265 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1265 if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
1266 goto unlock; 1266 goto unlock;
1267 1267
1268 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1268 __migrate_swap_task(arg->src_task, arg->dst_cpu);
@@ -1303,10 +1303,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
1303 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1303 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1304 goto out; 1304 goto out;
1305 1305
1306 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1306 if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
1307 goto out; 1307 goto out;
1308 1308
1309 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1309 if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
1310 goto out; 1310 goto out;
1311 1311
1312 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1312 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
@@ -1490,14 +1490,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1490 for_each_cpu(dest_cpu, nodemask) { 1490 for_each_cpu(dest_cpu, nodemask) {
1491 if (!cpu_active(dest_cpu)) 1491 if (!cpu_active(dest_cpu))
1492 continue; 1492 continue;
1493 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1493 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
1494 return dest_cpu; 1494 return dest_cpu;
1495 } 1495 }
1496 } 1496 }
1497 1497
1498 for (;;) { 1498 for (;;) {
1499 /* Any allowed, online CPU? */ 1499 /* Any allowed, online CPU? */
1500 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1500 for_each_cpu(dest_cpu, &p->cpus_allowed) {
1501 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) 1501 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1502 continue; 1502 continue;
1503 if (!cpu_online(dest_cpu)) 1503 if (!cpu_online(dest_cpu))
@@ -1552,7 +1552,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1552 if (tsk_nr_cpus_allowed(p) > 1) 1552 if (tsk_nr_cpus_allowed(p) > 1)
1553 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1553 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1554 else 1554 else
1555 cpu = cpumask_any(tsk_cpus_allowed(p)); 1555 cpu = cpumask_any(&p->cpus_allowed);
1556 1556
1557 /* 1557 /*
1558 * In order not to call set_task_cpu() on a blocking task we need 1558 * In order not to call set_task_cpu() on a blocking task we need
@@ -1564,7 +1564,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1564 * [ this allows ->select_task() to simply return task_cpu(p) and 1564 * [ this allows ->select_task() to simply return task_cpu(p) and
1565 * not worry about this generic constraint ] 1565 * not worry about this generic constraint ]
1566 */ 1566 */
1567 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1567 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
1568 !cpu_online(cpu))) 1568 !cpu_online(cpu)))
1569 cpu = select_fallback_rq(task_cpu(p), p); 1569 cpu = select_fallback_rq(task_cpu(p), p);
1570 1570
@@ -5473,7 +5473,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
5473 if (curr_cpu == target_cpu) 5473 if (curr_cpu == target_cpu)
5474 return 0; 5474 return 0;
5475 5475
5476 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 5476 if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
5477 return -EINVAL; 5477 return -EINVAL;
5478 5478
5479 /* TODO: This is not properly updating schedstats */ 5479 /* TODO: This is not properly updating schedstats */
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index e73119013c53..fba235c7d026 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
128 const struct sched_dl_entity *dl_se = &p->dl; 128 const struct sched_dl_entity *dl_se = &p->dl;
129 129
130 if (later_mask && 130 if (later_mask &&
131 cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { 131 cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
132 best_cpu = cpumask_any(later_mask); 132 best_cpu = cpumask_any(later_mask);
133 goto out; 133 goto out;
134 } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && 134 } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
135 dl_time_before(dl_se->deadline, cp->elements[0].dl)) { 135 dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
136 best_cpu = cpudl_maximum(cp); 136 best_cpu = cpudl_maximum(cp);
137 if (later_mask) 137 if (later_mask)
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 11e9705bf937..981fcd7dc394 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
103 if (skip) 103 if (skip)
104 continue; 104 continue;
105 105
106 if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) 106 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
107 continue; 107 continue;
108 108
109 if (lowest_mask) { 109 if (lowest_mask) {
110 cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); 110 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
111 111
112 /* 112 /*
113 * We have to ensure that we have at least one bit 113 * We have to ensure that we have at least one bit
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 27737f34757d..8e4d6e4e3ccc 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -252,7 +252,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
252 * If we cannot preempt any rq, fall back to pick any 252 * If we cannot preempt any rq, fall back to pick any
253 * online cpu. 253 * online cpu.
254 */ 254 */
255 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); 255 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
256 if (cpu >= nr_cpu_ids) { 256 if (cpu >= nr_cpu_ids) {
257 /* 257 /*
258 * Fail to find any suitable cpu. 258 * Fail to find any suitable cpu.
@@ -1235,7 +1235,7 @@ static void set_curr_task_dl(struct rq *rq)
1235static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1235static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1236{ 1236{
1237 if (!task_running(rq, p) && 1237 if (!task_running(rq, p) &&
1238 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 1238 cpumask_test_cpu(cpu, &p->cpus_allowed))
1239 return 1; 1239 return 1;
1240 return 0; 1240 return 0;
1241} 1241}
@@ -1384,8 +1384,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1384 /* Retry if something changed. */ 1384 /* Retry if something changed. */
1385 if (double_lock_balance(rq, later_rq)) { 1385 if (double_lock_balance(rq, later_rq)) {
1386 if (unlikely(task_rq(task) != rq || 1386 if (unlikely(task_rq(task) != rq ||
1387 !cpumask_test_cpu(later_rq->cpu, 1387 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1388 tsk_cpus_allowed(task)) ||
1389 task_running(rq, task) || 1388 task_running(rq, task) ||
1390 !dl_task(task) || 1389 !dl_task(task) ||
1391 !task_on_rq_queued(task))) { 1390 !task_on_rq_queued(task))) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 274c747a01ce..3b60d73ab290 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1551,7 +1551,7 @@ static void task_numa_compare(struct task_numa_env *env,
1551 */ 1551 */
1552 if (cur) { 1552 if (cur) {
1553 /* Skip this swap candidate if cannot move to the source cpu */ 1553 /* Skip this swap candidate if cannot move to the source cpu */
1554 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) 1554 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
1555 goto unlock; 1555 goto unlock;
1556 1556
1557 /* 1557 /*
@@ -1661,7 +1661,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
1661 1661
1662 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1662 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1663 /* Skip this CPU if the source task cannot migrate */ 1663 /* Skip this CPU if the source task cannot migrate */
1664 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) 1664 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
1665 continue; 1665 continue;
1666 1666
1667 env->dst_cpu = cpu; 1667 env->dst_cpu = cpu;
@@ -5458,7 +5458,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5458 5458
5459 /* Skip over this group if it has no CPUs allowed */ 5459 /* Skip over this group if it has no CPUs allowed */
5460 if (!cpumask_intersects(sched_group_cpus(group), 5460 if (!cpumask_intersects(sched_group_cpus(group),
5461 tsk_cpus_allowed(p))) 5461 &p->cpus_allowed))
5462 continue; 5462 continue;
5463 5463
5464 local_group = cpumask_test_cpu(this_cpu, 5464 local_group = cpumask_test_cpu(this_cpu,
@@ -5578,7 +5578,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5578 return cpumask_first(sched_group_cpus(group)); 5578 return cpumask_first(sched_group_cpus(group));
5579 5579
5580 /* Traverse only the allowed CPUs */ 5580 /* Traverse only the allowed CPUs */
5581 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { 5581 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
5582 if (idle_cpu(i)) { 5582 if (idle_cpu(i)) {
5583 struct rq *rq = cpu_rq(i); 5583 struct rq *rq = cpu_rq(i);
5584 struct cpuidle_state *idle = idle_get_state(rq); 5584 struct cpuidle_state *idle = idle_get_state(rq);
@@ -5717,7 +5717,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
5717 if (!test_idle_cores(target, false)) 5717 if (!test_idle_cores(target, false))
5718 return -1; 5718 return -1;
5719 5719
5720 cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p)); 5720 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
5721 5721
5722 for_each_cpu_wrap(core, cpus, target, wrap) { 5722 for_each_cpu_wrap(core, cpus, target, wrap) {
5723 bool idle = true; 5723 bool idle = true;
@@ -5751,7 +5751,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
5751 return -1; 5751 return -1;
5752 5752
5753 for_each_cpu(cpu, cpu_smt_mask(target)) { 5753 for_each_cpu(cpu, cpu_smt_mask(target)) {
5754 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 5754 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
5755 continue; 5755 continue;
5756 if (idle_cpu(cpu)) 5756 if (idle_cpu(cpu))
5757 return cpu; 5757 return cpu;
@@ -5803,7 +5803,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5803 time = local_clock(); 5803 time = local_clock();
5804 5804
5805 for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) { 5805 for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5806 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 5806 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
5807 continue; 5807 continue;
5808 if (idle_cpu(cpu)) 5808 if (idle_cpu(cpu))
5809 break; 5809 break;
@@ -5958,7 +5958,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
5958 if (sd_flag & SD_BALANCE_WAKE) { 5958 if (sd_flag & SD_BALANCE_WAKE) {
5959 record_wakee(p); 5959 record_wakee(p);
5960 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) 5960 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
5961 && cpumask_test_cpu(cpu, tsk_cpus_allowed(p)); 5961 && cpumask_test_cpu(cpu, &p->cpus_allowed);
5962 } 5962 }
5963 5963
5964 rcu_read_lock(); 5964 rcu_read_lock();
@@ -6698,7 +6698,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
6698 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 6698 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6699 return 0; 6699 return 0;
6700 6700
6701 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { 6701 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
6702 int cpu; 6702 int cpu;
6703 6703
6704 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); 6704 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
@@ -6718,7 +6718,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
6718 6718
6719 /* Prevent to re-select dst_cpu via env's cpus */ 6719 /* Prevent to re-select dst_cpu via env's cpus */
6720 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 6720 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6721 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { 6721 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
6722 env->flags |= LBF_DST_PINNED; 6722 env->flags |= LBF_DST_PINNED;
6723 env->new_dst_cpu = cpu; 6723 env->new_dst_cpu = cpu;
6724 break; 6724 break;
@@ -7252,7 +7252,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
7252 7252
7253/* 7253/*
7254 * Group imbalance indicates (and tries to solve) the problem where balancing 7254 * Group imbalance indicates (and tries to solve) the problem where balancing
7255 * groups is inadequate due to tsk_cpus_allowed() constraints. 7255 * groups is inadequate due to ->cpus_allowed constraints.
7256 * 7256 *
7257 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a 7257 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
7258 * cpumask covering 1 cpu of the first group and 3 cpus of the second group. 7258 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
@@ -8211,8 +8211,7 @@ more_balance:
8211 * if the curr task on busiest cpu can't be 8211 * if the curr task on busiest cpu can't be
8212 * moved to this_cpu 8212 * moved to this_cpu
8213 */ 8213 */
8214 if (!cpumask_test_cpu(this_cpu, 8214 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
8215 tsk_cpus_allowed(busiest->curr))) {
8216 raw_spin_unlock_irqrestore(&busiest->lock, 8215 raw_spin_unlock_irqrestore(&busiest->lock,
8217 flags); 8216 flags);
8218 env.flags |= LBF_ALL_PINNED; 8217 env.flags |= LBF_ALL_PINNED;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e8836cfc4cdb..cbd356f63883 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1591static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1591static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1592{ 1592{
1593 if (!task_running(rq, p) && 1593 if (!task_running(rq, p) &&
1594 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 1594 cpumask_test_cpu(cpu, &p->cpus_allowed))
1595 return 1; 1595 return 1;
1596 return 0; 1596 return 0;
1597} 1597}
@@ -1726,8 +1726,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1726 * Also make sure that it wasn't scheduled on its rq. 1726 * Also make sure that it wasn't scheduled on its rq.
1727 */ 1727 */
1728 if (unlikely(task_rq(task) != rq || 1728 if (unlikely(task_rq(task) != rq ||
1729 !cpumask_test_cpu(lowest_rq->cpu, 1729 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
1730 tsk_cpus_allowed(task)) ||
1731 task_running(rq, task) || 1730 task_running(rq, task) ||
1732 !rt_task(task) || 1731 !rt_task(task) ||
1733 !task_on_rq_queued(task))) { 1732 !task_on_rq_queued(task))) {
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 1afec32de6f2..690d75b132fa 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
22 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id(): 23 * smp_processor_id():
24 */ 24 */
25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) 25 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
26 goto out; 26 goto out;
27 27
28 /* 28 /*
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 30e282d33d4d..bc7fcf010a5b 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
33 33
34 /* Silly tracepoints */ 34 /* Silly tracepoints */
35 trace_foo_bar("hello", cnt, array, random_strings[len], 35 trace_foo_bar("hello", cnt, array, random_strings[len],
36 tsk_cpus_allowed(current)); 36 &current->cpus_allowed);
37 37
38 trace_foo_with_template_simple("HELLO", cnt); 38 trace_foo_with_template_simple("HELLO", cnt);
39 39