aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-10 14:32:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-10 14:32:01 -0400
commitac2440654df6ac7314e2f8819fe05e7c863a2392 (patch)
treebe0cc8040cc19c67a2dd035aacedb54ab4dd41e6
parent2d0bd9534c8ddaebee64e1b4b7d621915f65e994 (diff)
parent13b5ab02ae118fc8dfdc2b8597688ec4a11d5b53 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "A UP kernel cpufreq fix and a rt/dl scheduler corner case fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/rt, sched/dl: Don't push if task's scheduling class was changed sched/fair: Fix !CONFIG_SMP kernel cpufreq governor breakage
-rw-r--r--kernel/sched/deadline.c1
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/rt.c1
3 files changed, 10 insertions, 1 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index affd97ec9f65..686ec8adf952 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1394 !cpumask_test_cpu(later_rq->cpu, 1394 !cpumask_test_cpu(later_rq->cpu,
1395 &task->cpus_allowed) || 1395 &task->cpus_allowed) ||
1396 task_running(rq, task) || 1396 task_running(rq, task) ||
1397 !dl_task(task) ||
1397 !task_on_rq_queued(task))) { 1398 !task_on_rq_queued(task))) {
1398 double_unlock_balance(rq, later_rq); 1399 double_unlock_balance(rq, later_rq);
1399 later_rq = NULL; 1400 later_rq = NULL;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0fe30e66aff1..40748dc8ea3e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq);
3030 3030
3031#else /* CONFIG_SMP */ 3031#else /* CONFIG_SMP */
3032 3032
3033static inline void update_load_avg(struct sched_entity *se, int update_tg) {} 3033static inline void update_load_avg(struct sched_entity *se, int not_used)
3034{
3035 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3036 struct rq *rq = rq_of(cfs_rq);
3037
3038 cpufreq_trigger_update(rq_clock(rq));
3039}
3040
3034static inline void 3041static inline void
3035enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3042enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3036static inline void 3043static inline void
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c41ea7ac1764..ec4f538d4396 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1729 !cpumask_test_cpu(lowest_rq->cpu, 1729 !cpumask_test_cpu(lowest_rq->cpu,
1730 tsk_cpus_allowed(task)) || 1730 tsk_cpus_allowed(task)) ||
1731 task_running(rq, task) || 1731 task_running(rq, task) ||
1732 !rt_task(task) ||
1732 !task_on_rq_queued(task))) { 1733 !task_on_rq_queued(task))) {
1733 1734
1734 double_unlock_balance(rq, lowest_rq); 1735 double_unlock_balance(rq, lowest_rq);