aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2008-06-05 15:57:11 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-10 06:26:16 -0400
commit9985b0bab332289f14837eff3c6e0bcc658b58f7 (patch)
tree8c4bcbc4348c473b14644dc4d371a45c0dabda53 /kernel
parent7def2be1dc679984f4c4fb3ef19a8a081b2454ec (diff)
sched: prevent bound kthreads from changing cpus_allowed
Kthreads that have called kthread_bind() are bound to specific cpus, so other tasks should not be able to change their cpus_allowed from under them. Otherwise, it is possible to move kthreads, such as the migration or software watchdog threads, so they are not allowed access to the cpu they work on. Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Menage <menage@google.com> Cc: Paul Jackson <pj@sgi.com> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c14
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/sched.c6
3 files changed, 20 insertions, 1 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6090d18b58a9..b84354f4de36 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1190,6 +1190,15 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
1190 1190
1191 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1191 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1192 return -ENOSPC; 1192 return -ENOSPC;
1193 if (tsk->flags & PF_THREAD_BOUND) {
1194 cpumask_t mask;
1195
1196 mutex_lock(&callback_mutex);
1197 mask = cs->cpus_allowed;
1198 mutex_unlock(&callback_mutex);
1199 if (!cpus_equal(tsk->cpus_allowed, mask))
1200 return -EINVAL;
1201 }
1193 1202
1194 return security_task_setscheduler(tsk, 0, NULL); 1203 return security_task_setscheduler(tsk, 0, NULL);
1195} 1204}
@@ -1203,11 +1212,14 @@ static void cpuset_attach(struct cgroup_subsys *ss,
1203 struct mm_struct *mm; 1212 struct mm_struct *mm;
1204 struct cpuset *cs = cgroup_cs(cont); 1213 struct cpuset *cs = cgroup_cs(cont);
1205 struct cpuset *oldcs = cgroup_cs(oldcont); 1214 struct cpuset *oldcs = cgroup_cs(oldcont);
1215 int err;
1206 1216
1207 mutex_lock(&callback_mutex); 1217 mutex_lock(&callback_mutex);
1208 guarantee_online_cpus(cs, &cpus); 1218 guarantee_online_cpus(cs, &cpus);
1209 set_cpus_allowed_ptr(tsk, &cpus); 1219 err = set_cpus_allowed_ptr(tsk, &cpus);
1210 mutex_unlock(&callback_mutex); 1220 mutex_unlock(&callback_mutex);
1221 if (err)
1222 return;
1211 1223
1212 from = oldcs->mems_allowed; 1224 from = oldcs->mems_allowed;
1213 to = cs->mems_allowed; 1225 to = cs->mems_allowed;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index bd1b9ea024e1..97747cdd37c9 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -180,6 +180,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
183 k->flags |= PF_THREAD_BOUND;
183} 184}
184EXPORT_SYMBOL(kthread_bind); 185EXPORT_SYMBOL(kthread_bind);
185 186
diff --git a/kernel/sched.c b/kernel/sched.c
index e9c24a128655..164fe7fe0d89 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5563,6 +5563,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
5563 goto out; 5563 goto out;
5564 } 5564 }
5565 5565
5566 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
5567 !cpus_equal(p->cpus_allowed, *new_mask))) {
5568 ret = -EINVAL;
5569 goto out;
5570 }
5571
5566 if (p->sched_class->set_cpus_allowed) 5572 if (p->sched_class->set_cpus_allowed)
5567 p->sched_class->set_cpus_allowed(p, new_mask); 5573 p->sched_class->set_cpus_allowed(p, new_mask);
5568 else { 5574 else {