aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/sched.h7
-rw-r--r--kernel/kthread.c20
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/workqueue.c6
5 files changed, 52 insertions, 18 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d55206ccf6..869b21dcf503 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
38}) 38})
39 39
40void kthread_bind(struct task_struct *k, unsigned int cpu); 40void kthread_bind(struct task_struct *k, unsigned int cpu);
41void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
41int kthread_stop(struct task_struct *k); 42int kthread_stop(struct task_struct *k);
42bool kthread_should_stop(void); 43bool kthread_should_stop(void);
43bool kthread_should_park(void); 44bool kthread_should_park(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 44dca5b35de6..81bb4577274b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2203,13 +2203,6 @@ static inline void calc_load_enter_idle(void) { }
2203static inline void calc_load_exit_idle(void) { } 2203static inline void calc_load_exit_idle(void) { }
2204#endif /* CONFIG_NO_HZ_COMMON */ 2204#endif /* CONFIG_NO_HZ_COMMON */
2205 2205
2206#ifndef CONFIG_CPUMASK_OFFSTACK
2207static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2208{
2209 return set_cpus_allowed_ptr(p, &new_mask);
2210}
2211#endif
2212
2213/* 2206/*
2214 * Do not use outside of architecture code which knows its limitations. 2207 * Do not use outside of architecture code which knows its limitations.
2215 * 2208 *
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..7c40a189becc 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -325,16 +325,30 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
325} 325}
326EXPORT_SYMBOL(kthread_create_on_node); 326EXPORT_SYMBOL(kthread_create_on_node);
327 327
328static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) 328static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
329{ 329{
330 /* Must have done schedule() in kthread() before we set_task_cpu */ 330 unsigned long flags;
331
331 if (!wait_task_inactive(p, state)) { 332 if (!wait_task_inactive(p, state)) {
332 WARN_ON(1); 333 WARN_ON(1);
333 return; 334 return;
334 } 335 }
336
335 /* It's safe because the task is inactive. */ 337 /* It's safe because the task is inactive. */
336 do_set_cpus_allowed(p, cpumask_of(cpu)); 338 raw_spin_lock_irqsave(&p->pi_lock, flags);
339 do_set_cpus_allowed(p, mask);
337 p->flags |= PF_NO_SETAFFINITY; 340 p->flags |= PF_NO_SETAFFINITY;
341 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
342}
343
344static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
345{
346 __kthread_bind_mask(p, cpumask_of(cpu), state);
347}
348
349void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
350{
351 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
338} 352}
339 353
340/** 354/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ea6d74345e60..2e3b983da836 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1153,6 +1153,8 @@ static int migration_cpu_stop(void *data)
1153 1153
1154void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1154void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1155{ 1155{
1156 lockdep_assert_held(&p->pi_lock);
1157
1156 if (p->sched_class->set_cpus_allowed) 1158 if (p->sched_class->set_cpus_allowed)
1157 p->sched_class->set_cpus_allowed(p, new_mask); 1159 p->sched_class->set_cpus_allowed(p, new_mask);
1158 1160
@@ -1169,7 +1171,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1169 * task must not exit() & deallocate itself prematurely. The 1171 * task must not exit() & deallocate itself prematurely. The
1170 * call is not atomic; no spinlocks may be held. 1172 * call is not atomic; no spinlocks may be held.
1171 */ 1173 */
1172int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1174static int __set_cpus_allowed_ptr(struct task_struct *p,
1175 const struct cpumask *new_mask, bool check)
1173{ 1176{
1174 unsigned long flags; 1177 unsigned long flags;
1175 struct rq *rq; 1178 struct rq *rq;
@@ -1178,6 +1181,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1178 1181
1179 rq = task_rq_lock(p, &flags); 1182 rq = task_rq_lock(p, &flags);
1180 1183
1184 /*
1185 * Must re-check here, to close a race against __kthread_bind(),
1186 * sched_setaffinity() is not guaranteed to observe the flag.
1187 */
1188 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1189 ret = -EINVAL;
1190 goto out;
1191 }
1192
1181 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1193 if (cpumask_equal(&p->cpus_allowed, new_mask))
1182 goto out; 1194 goto out;
1183 1195
@@ -1214,6 +1226,11 @@ out:
1214 1226
1215 return ret; 1227 return ret;
1216} 1228}
1229
1230int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1231{
1232 return __set_cpus_allowed_ptr(p, new_mask, false);
1233}
1217EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1234EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1218 1235
1219void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1236void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
@@ -1595,6 +1612,15 @@ static void update_avg(u64 *avg, u64 sample)
1595 s64 diff = sample - *avg; 1612 s64 diff = sample - *avg;
1596 *avg += diff >> 3; 1613 *avg += diff >> 3;
1597} 1614}
1615
1616#else
1617
1618static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1619 const struct cpumask *new_mask, bool check)
1620{
1621 return set_cpus_allowed_ptr(p, new_mask);
1622}
1623
1598#endif /* CONFIG_SMP */ 1624#endif /* CONFIG_SMP */
1599 1625
1600static void 1626static void
@@ -4340,7 +4366,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4340 } 4366 }
4341#endif 4367#endif
4342again: 4368again:
4343 retval = set_cpus_allowed_ptr(p, new_mask); 4369 retval = __set_cpus_allowed_ptr(p, new_mask, true);
4344 4370
4345 if (!retval) { 4371 if (!retval) {
4346 cpuset_cpus_allowed(p, cpus_allowed); 4372 cpuset_cpus_allowed(p, cpus_allowed);
@@ -4865,7 +4891,8 @@ void init_idle(struct task_struct *idle, int cpu)
4865 struct rq *rq = cpu_rq(cpu); 4891 struct rq *rq = cpu_rq(cpu);
4866 unsigned long flags; 4892 unsigned long flags;
4867 4893
4868 raw_spin_lock_irqsave(&rq->lock, flags); 4894 raw_spin_lock_irqsave(&idle->pi_lock, flags);
4895 raw_spin_lock(&rq->lock);
4869 4896
4870 __sched_fork(0, idle); 4897 __sched_fork(0, idle);
4871 idle->state = TASK_RUNNING; 4898 idle->state = TASK_RUNNING;
@@ -4891,7 +4918,8 @@ void init_idle(struct task_struct *idle, int cpu)
4891#if defined(CONFIG_SMP) 4918#if defined(CONFIG_SMP)
4892 idle->on_cpu = 1; 4919 idle->on_cpu = 1;
4893#endif 4920#endif
4894 raw_spin_unlock_irqrestore(&rq->lock, flags); 4921 raw_spin_unlock(&rq->lock);
4922 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
4895 4923
4896 /* Set the preempt count _outside_ the spinlocks! */ 4924 /* Set the preempt count _outside_ the spinlocks! */
4897 init_idle_preempt_count(idle, cpu); 4925 init_idle_preempt_count(idle, cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4c4f06176f74..f5782d5fd196 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1714,9 +1714,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1714 goto fail; 1714 goto fail;
1715 1715
1716 set_user_nice(worker->task, pool->attrs->nice); 1716 set_user_nice(worker->task, pool->attrs->nice);
1717 1717 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1718 /* prevent userland from meddling with cpumask of workqueue workers */
1719 worker->task->flags |= PF_NO_SETAFFINITY;
1720 1718
1721 /* successful, attach the worker to the pool */ 1719 /* successful, attach the worker to the pool */
1722 worker_attach_to_pool(worker, pool); 1720 worker_attach_to_pool(worker, pool);
@@ -3856,7 +3854,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3856 } 3854 }
3857 3855
3858 wq->rescuer = rescuer; 3856 wq->rescuer = rescuer;
3859 rescuer->task->flags |= PF_NO_SETAFFINITY; 3857 kthread_bind_mask(rescuer->task, cpu_possible_mask);
3860 wake_up_process(rescuer->task); 3858 wake_up_process(rescuer->task);
3861 } 3859 }
3862 3860