diff options
| author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-05-19 02:08:58 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-05-28 11:02:57 -0400 |
| commit | 1e1b6c511d1b23cb7c3b619d82fc7bd9f620565d (patch) | |
| tree | c50e88412c8b42264177dc125f74a30f9c7a82d9 | |
| parent | 1e876231785d82443a5ac8b6c660e9f51bc5dede (diff) | |
cpuset: Fix cpuset_cpus_allowed_fallback(), don't update tsk->rt.nr_cpus_allowed
The rule is, we have to update tsk->rt.nr_cpus_allowed if we change
tsk->cpus_allowed. Otherwise RT scheduler may confuse.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/4DD4B3FA.5060901@jp.fujitsu.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | include/linux/cpuset.h | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 7 | ||||
| -rw-r--r-- | kernel/cpuset.c | 4 | ||||
| -rw-r--r-- | kernel/kthread.c | 4 | ||||
| -rw-r--r-- | kernel/sched.c | 19 |
5 files changed, 24 insertions, 12 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index f20eb8f16025..e9eaec522655 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -146,7 +146,7 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, | |||
| 146 | 146 | ||
| 147 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) | 147 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) |
| 148 | { | 148 | { |
| 149 | cpumask_copy(&p->cpus_allowed, cpu_possible_mask); | 149 | do_set_cpus_allowed(p, cpu_possible_mask); |
| 150 | return cpumask_any(cpu_active_mask); | 150 | return cpumask_any(cpu_active_mask); |
| 151 | } | 151 | } |
| 152 | 152 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index dc8871295a5a..8da84b7bc1b8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1841,9 +1841,16 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
| 1841 | #endif | 1841 | #endif |
| 1842 | 1842 | ||
| 1843 | #ifdef CONFIG_SMP | 1843 | #ifdef CONFIG_SMP |
| 1844 | extern void do_set_cpus_allowed(struct task_struct *p, | ||
| 1845 | const struct cpumask *new_mask); | ||
| 1846 | |||
| 1844 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1847 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
| 1845 | const struct cpumask *new_mask); | 1848 | const struct cpumask *new_mask); |
| 1846 | #else | 1849 | #else |
| 1850 | static inline void do_set_cpus_allowed(struct task_struct *p, | ||
| 1851 | const struct cpumask *new_mask) | ||
| 1852 | { | ||
| 1853 | } | ||
| 1847 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1854 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
| 1848 | const struct cpumask *new_mask) | 1855 | const struct cpumask *new_mask) |
| 1849 | { | 1856 | { |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1ceeb049c827..9c9b7545c810 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) | |||
| 2190 | rcu_read_lock(); | 2190 | rcu_read_lock(); |
| 2191 | cs = task_cs(tsk); | 2191 | cs = task_cs(tsk); |
| 2192 | if (cs) | 2192 | if (cs) |
| 2193 | cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed); | 2193 | do_set_cpus_allowed(tsk, cs->cpus_allowed); |
| 2194 | rcu_read_unlock(); | 2194 | rcu_read_unlock(); |
| 2195 | 2195 | ||
| 2196 | /* | 2196 | /* |
| @@ -2217,7 +2217,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) | |||
| 2217 | * Like above we can temporary set any mask and rely on | 2217 | * Like above we can temporary set any mask and rely on |
| 2218 | * set_cpus_allowed_ptr() as synchronization point. | 2218 | * set_cpus_allowed_ptr() as synchronization point. |
| 2219 | */ | 2219 | */ |
| 2220 | cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask); | 2220 | do_set_cpus_allowed(tsk, cpu_possible_mask); |
| 2221 | cpu = cpumask_any(cpu_active_mask); | 2221 | cpu = cpumask_any(cpu_active_mask); |
| 2222 | } | 2222 | } |
| 2223 | 2223 | ||
diff --git a/kernel/kthread.c b/kernel/kthread.c index 3b34d2732bce..4ba7cccb4994 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) | |||
| 202 | return; | 202 | return; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | p->cpus_allowed = cpumask_of_cpu(cpu); | 205 | /* It's safe because the task is inactive. */ |
| 206 | p->rt.nr_cpus_allowed = 1; | 206 | do_set_cpus_allowed(p, cpumask_of(cpu)); |
| 207 | p->flags |= PF_THREAD_BOUND; | 207 | p->flags |= PF_THREAD_BOUND; |
| 208 | } | 208 | } |
| 209 | EXPORT_SYMBOL(kthread_bind); | 209 | EXPORT_SYMBOL(kthread_bind); |
diff --git a/kernel/sched.c b/kernel/sched.c index a80ee911900e..cbb3a0eee58e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -5860,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5860 | idle->state = TASK_RUNNING; | 5860 | idle->state = TASK_RUNNING; |
| 5861 | idle->se.exec_start = sched_clock(); | 5861 | idle->se.exec_start = sched_clock(); |
| 5862 | 5862 | ||
| 5863 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 5863 | do_set_cpus_allowed(idle, cpumask_of(cpu)); |
| 5864 | /* | 5864 | /* |
| 5865 | * We're having a chicken and egg problem, even though we are | 5865 | * We're having a chicken and egg problem, even though we are |
| 5866 | * holding rq->lock, the cpu isn't yet set to this cpu so the | 5866 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
| @@ -5948,6 +5948,16 @@ static inline void sched_init_granularity(void) | |||
| 5948 | } | 5948 | } |
| 5949 | 5949 | ||
| 5950 | #ifdef CONFIG_SMP | 5950 | #ifdef CONFIG_SMP |
| 5951 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | ||
| 5952 | { | ||
| 5953 | if (p->sched_class && p->sched_class->set_cpus_allowed) | ||
| 5954 | p->sched_class->set_cpus_allowed(p, new_mask); | ||
| 5955 | else { | ||
| 5956 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
| 5957 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | ||
| 5958 | } | ||
| 5959 | } | ||
| 5960 | |||
| 5951 | /* | 5961 | /* |
| 5952 | * This is how migration works: | 5962 | * This is how migration works: |
| 5953 | * | 5963 | * |
| @@ -5993,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
| 5993 | goto out; | 6003 | goto out; |
| 5994 | } | 6004 | } |
| 5995 | 6005 | ||
| 5996 | if (p->sched_class->set_cpus_allowed) | 6006 | do_set_cpus_allowed(p, new_mask); |
| 5997 | p->sched_class->set_cpus_allowed(p, new_mask); | ||
| 5998 | else { | ||
| 5999 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
| 6000 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | ||
| 6001 | } | ||
| 6002 | 6007 | ||
| 6003 | /* Can the task run on the task's current CPU? If so, we're done */ | 6008 | /* Can the task run on the task's current CPU? If so, we're done */ |
| 6004 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 6009 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
