aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/sched.c19
3 files changed, 16 insertions, 11 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1ceeb049c827..9c9b7545c810 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2190 rcu_read_lock(); 2190 rcu_read_lock();
2191 cs = task_cs(tsk); 2191 cs = task_cs(tsk);
2192 if (cs) 2192 if (cs)
2193 cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed); 2193 do_set_cpus_allowed(tsk, cs->cpus_allowed);
2194 rcu_read_unlock(); 2194 rcu_read_unlock();
2195 2195
2196 /* 2196 /*
@@ -2217,7 +2217,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2217 * Like above we can temporary set any mask and rely on 2217 * Like above we can temporary set any mask and rely on
2218 * set_cpus_allowed_ptr() as synchronization point. 2218 * set_cpus_allowed_ptr() as synchronization point.
2219 */ 2219 */
2220 cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask); 2220 do_set_cpus_allowed(tsk, cpu_possible_mask);
2221 cpu = cpumask_any(cpu_active_mask); 2221 cpu = cpumask_any(cpu_active_mask);
2222 } 2222 }
2223 2223
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 3b34d2732bce..4ba7cccb4994 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
202 return; 202 return;
203 } 203 }
204 204
205 p->cpus_allowed = cpumask_of_cpu(cpu); 205 /* It's safe because the task is inactive. */
206 p->rt.nr_cpus_allowed = 1; 206 do_set_cpus_allowed(p, cpumask_of(cpu));
207 p->flags |= PF_THREAD_BOUND; 207 p->flags |= PF_THREAD_BOUND;
208} 208}
209EXPORT_SYMBOL(kthread_bind); 209EXPORT_SYMBOL(kthread_bind);
diff --git a/kernel/sched.c b/kernel/sched.c
index a80ee911900e..cbb3a0eee58e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5860,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5860 idle->state = TASK_RUNNING; 5860 idle->state = TASK_RUNNING;
5861 idle->se.exec_start = sched_clock(); 5861 idle->se.exec_start = sched_clock();
5862 5862
5863 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 5863 do_set_cpus_allowed(idle, cpumask_of(cpu));
5864 /* 5864 /*
5865 * We're having a chicken and egg problem, even though we are 5865 * We're having a chicken and egg problem, even though we are
5866 * holding rq->lock, the cpu isn't yet set to this cpu so the 5866 * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -5948,6 +5948,16 @@ static inline void sched_init_granularity(void)
5948} 5948}
5949 5949
5950#ifdef CONFIG_SMP 5950#ifdef CONFIG_SMP
5951void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
5952{
5953 if (p->sched_class && p->sched_class->set_cpus_allowed)
5954 p->sched_class->set_cpus_allowed(p, new_mask);
5955 else {
5956 cpumask_copy(&p->cpus_allowed, new_mask);
5957 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
5958 }
5959}
5960
5951/* 5961/*
5952 * This is how migration works: 5962 * This is how migration works:
5953 * 5963 *
@@ -5993,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5993 goto out; 6003 goto out;
5994 } 6004 }
5995 6005
5996 if (p->sched_class->set_cpus_allowed) 6006 do_set_cpus_allowed(p, new_mask);
5997 p->sched_class->set_cpus_allowed(p, new_mask);
5998 else {
5999 cpumask_copy(&p->cpus_allowed, new_mask);
6000 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
6001 }
6002 6007
6003 /* Can the task run on the task's current CPU? If so, we're done */ 6008 /* Can the task run on the task's current CPU? If so, we're done */
6004 if (cpumask_test_cpu(task_cpu(p), new_mask)) 6009 if (cpumask_test_cpu(task_cpu(p), new_mask))