diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/cpuset.c | 14 | ||||
-rw-r--r-- | kernel/kthread.c | 1 | ||||
-rw-r--r-- | kernel/sched.c | 6 |
4 files changed, 21 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d25acf600a32..2db1485f865d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1486,6 +1486,7 @@ static inline void put_task_struct(struct task_struct *t) | |||
1486 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1486 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
1487 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1487 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
1488 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1488 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
1489 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | ||
1489 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1490 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1490 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1491 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1491 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1492 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 6090d18b58a9..b84354f4de36 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1190,6 +1190,15 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, | |||
1190 | 1190 | ||
1191 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1191 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
1192 | return -ENOSPC; | 1192 | return -ENOSPC; |
1193 | if (tsk->flags & PF_THREAD_BOUND) { | ||
1194 | cpumask_t mask; | ||
1195 | |||
1196 | mutex_lock(&callback_mutex); | ||
1197 | mask = cs->cpus_allowed; | ||
1198 | mutex_unlock(&callback_mutex); | ||
1199 | if (!cpus_equal(tsk->cpus_allowed, mask)) | ||
1200 | return -EINVAL; | ||
1201 | } | ||
1193 | 1202 | ||
1194 | return security_task_setscheduler(tsk, 0, NULL); | 1203 | return security_task_setscheduler(tsk, 0, NULL); |
1195 | } | 1204 | } |
@@ -1203,11 +1212,14 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
1203 | struct mm_struct *mm; | 1212 | struct mm_struct *mm; |
1204 | struct cpuset *cs = cgroup_cs(cont); | 1213 | struct cpuset *cs = cgroup_cs(cont); |
1205 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1214 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1215 | int err; | ||
1206 | 1216 | ||
1207 | mutex_lock(&callback_mutex); | 1217 | mutex_lock(&callback_mutex); |
1208 | guarantee_online_cpus(cs, &cpus); | 1218 | guarantee_online_cpus(cs, &cpus); |
1209 | set_cpus_allowed_ptr(tsk, &cpus); | 1219 | err = set_cpus_allowed_ptr(tsk, &cpus); |
1210 | mutex_unlock(&callback_mutex); | 1220 | mutex_unlock(&callback_mutex); |
1221 | if (err) | ||
1222 | return; | ||
1211 | 1223 | ||
1212 | from = oldcs->mems_allowed; | 1224 | from = oldcs->mems_allowed; |
1213 | to = cs->mems_allowed; | 1225 | to = cs->mems_allowed; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index bd1b9ea024e1..97747cdd37c9 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -180,6 +180,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu) | |||
180 | set_task_cpu(k, cpu); | 180 | set_task_cpu(k, cpu); |
181 | k->cpus_allowed = cpumask_of_cpu(cpu); | 181 | k->cpus_allowed = cpumask_of_cpu(cpu); |
182 | k->rt.nr_cpus_allowed = 1; | 182 | k->rt.nr_cpus_allowed = 1; |
183 | k->flags |= PF_THREAD_BOUND; | ||
183 | } | 184 | } |
184 | EXPORT_SYMBOL(kthread_bind); | 185 | EXPORT_SYMBOL(kthread_bind); |
185 | 186 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index e9c24a128655..164fe7fe0d89 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5563,6 +5563,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5563 | goto out; | 5563 | goto out; |
5564 | } | 5564 | } |
5565 | 5565 | ||
5566 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | ||
5567 | !cpus_equal(p->cpus_allowed, *new_mask))) { | ||
5568 | ret = -EINVAL; | ||
5569 | goto out; | ||
5570 | } | ||
5571 | |||
5566 | if (p->sched_class->set_cpus_allowed) | 5572 | if (p->sched_class->set_cpus_allowed) |
5567 | p->sched_class->set_cpus_allowed(p, new_mask); | 5573 | p->sched_class->set_cpus_allowed(p, new_mask); |
5568 | else { | 5574 | else { |