aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-03 12:01:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-03 12:01:41 -0400
commit874cd339acdfe734b5418e36e3ad40fd4c573155 (patch)
treeacf1d2aa1bf1b50c2d81d3138f7166410fa2492b
parent26bdace74c857ce370ca23344e79b0b7cc17e9b3 (diff)
parent595058b6675e4d2a70dcd867c84d922975f9d22b (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: - two patches addressing the problem that the scheduler allows under certain conditions user space tasks to be scheduled on CPUs which are not yet fully booted which causes a few subtle and hard to debug issue - add a missing runqueue clock update in the deadline scheduler which triggers a warning under certain circumstances - fix a silly typo in the scheduler header file * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/headers: Fix typo sched/deadline: Fix missing clock update sched/core: Require cpu_active() in select_task_rq(), for user tasks sched/core: Fix rules for running on online && !active CPUs
-rw-r--r--kernel/sched/core.c45
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/sched.h2
3 files changed, 35 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 092f7c4de903..211890edf37e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
881} 881}
882 882
883#ifdef CONFIG_SMP 883#ifdef CONFIG_SMP
884
885static inline bool is_per_cpu_kthread(struct task_struct *p)
886{
887 if (!(p->flags & PF_KTHREAD))
888 return false;
889
890 if (p->nr_cpus_allowed != 1)
891 return false;
892
893 return true;
894}
895
896/*
897 * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
898 * __set_cpus_allowed_ptr() and select_fallback_rq().
899 */
900static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
901{
902 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
903 return false;
904
905 if (is_per_cpu_kthread(p))
906 return cpu_online(cpu);
907
908 return cpu_active(cpu);
909}
910
884/* 911/*
885 * This is how migration works: 912 * This is how migration works:
886 * 913 *
@@ -938,16 +965,8 @@ struct migration_arg {
938static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 965static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
939 struct task_struct *p, int dest_cpu) 966 struct task_struct *p, int dest_cpu)
940{ 967{
941 if (p->flags & PF_KTHREAD) {
942 if (unlikely(!cpu_online(dest_cpu)))
943 return rq;
944 } else {
945 if (unlikely(!cpu_active(dest_cpu)))
946 return rq;
947 }
948
949 /* Affinity changed (again). */ 968 /* Affinity changed (again). */
950 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 969 if (!is_cpu_allowed(p, dest_cpu))
951 return rq; 970 return rq;
952 971
953 update_rq_clock(rq); 972 update_rq_clock(rq);
@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1476 for (;;) { 1495 for (;;) {
1477 /* Any allowed, online CPU? */ 1496 /* Any allowed, online CPU? */
1478 for_each_cpu(dest_cpu, &p->cpus_allowed) { 1497 for_each_cpu(dest_cpu, &p->cpus_allowed) {
1479 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) 1498 if (!is_cpu_allowed(p, dest_cpu))
1480 continue;
1481 if (!cpu_online(dest_cpu))
1482 continue; 1499 continue;
1500
1483 goto out; 1501 goto out;
1484 } 1502 }
1485 1503
@@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1542 * [ this allows ->select_task() to simply return task_cpu(p) and 1560 * [ this allows ->select_task() to simply return task_cpu(p) and
1543 * not worry about this generic constraint ] 1561 * not worry about this generic constraint ]
1544 */ 1562 */
1545 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 1563 if (unlikely(!is_cpu_allowed(p, cpu)))
1546 !cpu_online(cpu)))
1547 cpu = select_fallback_rq(task_cpu(p), p); 1564 cpu = select_fallback_rq(task_cpu(p), p);
1548 1565
1549 return cpu; 1566 return cpu;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 1356afd1eeb6..fbfc3f1d368a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1259 1259
1260 rq = task_rq_lock(p, &rf); 1260 rq = task_rq_lock(p, &rf);
1261 1261
1262 sched_clock_tick();
1263 update_rq_clock(rq);
1264
1262 if (!dl_task(p) || p->state == TASK_DEAD) { 1265 if (!dl_task(p) || p->state == TASK_DEAD) {
1263 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1266 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1264 1267
@@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1278 if (dl_se->dl_non_contending == 0) 1281 if (dl_se->dl_non_contending == 0)
1279 goto unlock; 1282 goto unlock;
1280 1283
1281 sched_clock_tick();
1282 update_rq_clock(rq);
1283
1284 sub_running_bw(dl_se, &rq->dl); 1284 sub_running_bw(dl_se, &rq->dl);
1285 dl_se->dl_non_contending = 0; 1285 dl_se->dl_non_contending = 0;
1286unlock: 1286unlock:
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1f0a4bc6a39d..cb467c221b15 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
983} 983}
984 984
985/* 985/*
986 * See rt task throttoling, which is the only time a skip 986 * See rt task throttling, which is the only time a skip
987 * request is cancelled. 987 * request is cancelled.
988 */ 988 */
989static inline void rq_clock_cancel_skipupdate(struct rq *rq) 989static inline void rq_clock_cancel_skipupdate(struct rq *rq)