summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-07-25 12:58:21 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-31 06:24:24 -0400
commit175f0e25abeaa2218d431141ce19cf1de70fa82d (patch)
treebbdd1a283a4fb22431e3be75169a7cf4599f0aee /kernel/sched
parent786b71f5b754273ccef6d9462e52062b3e1f9877 (diff)
sched/core: Fix rules for running on online && !active CPUs
As already enforced by the WARN() in __set_cpus_allowed_ptr(), the rules for running on an online && !active CPU are stricter than just being a kthread, you need to be a per-cpu kthread. If you're not strictly per-CPU, you have better CPUs to run on and don't need the partially booted one to get your work done. The exception is to allow smpboot threads to bootstrap the CPU itself and get kernel 'services' initialized before we allow userspace on it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: 955dbdf4ce87 ("sched: Allow migrating kthreads into online but inactive CPUs") Link: http://lkml.kernel.org/r/20170725165821.cejhb7v2s3kecems@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c42
1 files changed, 30 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 092f7c4de903..1c58f54b9114 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
881} 881}
882 882
883#ifdef CONFIG_SMP 883#ifdef CONFIG_SMP
884
885static inline bool is_per_cpu_kthread(struct task_struct *p)
886{
887 if (!(p->flags & PF_KTHREAD))
888 return false;
889
890 if (p->nr_cpus_allowed != 1)
891 return false;
892
893 return true;
894}
895
896/*
897 * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
898 * __set_cpus_allowed_ptr() and select_fallback_rq().
899 */
900static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
901{
902 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
903 return false;
904
905 if (is_per_cpu_kthread(p))
906 return cpu_online(cpu);
907
908 return cpu_active(cpu);
909}
910
884/* 911/*
885 * This is how migration works: 912 * This is how migration works:
886 * 913 *
@@ -938,16 +965,8 @@ struct migration_arg {
938static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 965static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
939 struct task_struct *p, int dest_cpu) 966 struct task_struct *p, int dest_cpu)
940{ 967{
941 if (p->flags & PF_KTHREAD) {
942 if (unlikely(!cpu_online(dest_cpu)))
943 return rq;
944 } else {
945 if (unlikely(!cpu_active(dest_cpu)))
946 return rq;
947 }
948
949 /* Affinity changed (again). */ 968 /* Affinity changed (again). */
950 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 969 if (!is_cpu_allowed(p, dest_cpu))
951 return rq; 970 return rq;
952 971
953 update_rq_clock(rq); 972 update_rq_clock(rq);
@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1476 for (;;) { 1495 for (;;) {
1477 /* Any allowed, online CPU? */ 1496 /* Any allowed, online CPU? */
1478 for_each_cpu(dest_cpu, &p->cpus_allowed) { 1497 for_each_cpu(dest_cpu, &p->cpus_allowed) {
1479 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) 1498 if (!is_cpu_allowed(p, dest_cpu))
1480 continue;
1481 if (!cpu_online(dest_cpu))
1482 continue; 1499 continue;
1500
1483 goto out; 1501 goto out;
1484 } 1502 }
1485 1503