aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-30 15:13:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-30 15:13:56 -0400
commitae3e10aba57c284818fd493b18732ce8a4632e1e (patch)
treea9a7b4ee6eda59f7da79f1fe882f81618b5e6300
parent0634922a78f08df22037ec4ddee717f92d892a68 (diff)
parentf3d133ee0a17d5694c6f21873eec9863e11fa423 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: - a deadline scheduler related bug fix which triggered a kernel warning - an RT_RUNTIME_SHARE fix - a stop_machine preemption fix - a potential NULL dereference fix in sched_domain_debug_one()" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/rt: Restore rt_runtime after disabling RT_RUNTIME_SHARE sched/deadline: Update rq_clock of later_rq when pushing a task stop_machine: Disable preemption after queueing stopper threads sched/topology: Check variable group before dereferencing it
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/stop_machine.c10
4 files changed, 19 insertions, 3 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10c7b51c0d1f..b5fbdde6afa9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2090,8 +2090,14 @@ retry:
2090 sub_rq_bw(&next_task->dl, &rq->dl); 2090 sub_rq_bw(&next_task->dl, &rq->dl);
2091 set_task_cpu(next_task, later_rq->cpu); 2091 set_task_cpu(next_task, later_rq->cpu);
2092 add_rq_bw(&next_task->dl, &later_rq->dl); 2092 add_rq_bw(&next_task->dl, &later_rq->dl);
2093
2094 /*
2095 * Update the later_rq clock here, because the clock is used
2096 * by the cpufreq_update_util() inside __add_running_bw().
2097 */
2098 update_rq_clock(later_rq);
2093 add_running_bw(&next_task->dl, &later_rq->dl); 2099 add_running_bw(&next_task->dl, &later_rq->dl);
2094 activate_task(later_rq, next_task, 0); 2100 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2095 ret = 1; 2101 ret = 1;
2096 2102
2097 resched_curr(later_rq); 2103 resched_curr(later_rq);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 572567078b60..eaaec8364f96 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
836 * can be time-consuming. Try to avoid it when possible. 836 * can be time-consuming. Try to avoid it when possible.
837 */ 837 */
838 raw_spin_lock(&rt_rq->rt_runtime_lock); 838 raw_spin_lock(&rt_rq->rt_runtime_lock);
839 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
840 rt_rq->rt_runtime = rt_b->rt_runtime;
839 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 841 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
840 raw_spin_unlock(&rt_rq->rt_runtime_lock); 842 raw_spin_unlock(&rt_rq->rt_runtime_lock);
841 if (skip) 843 if (skip)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 05a831427bc7..56a0fed30c0a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
49 } 49 }
50 if (!cpumask_test_cpu(cpu, sched_group_span(group))) { 50 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
52 } 52 }
53 53
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 1ff523dae6e2..e190d1ef3a23 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -260,6 +260,15 @@ retry:
260 err = 0; 260 err = 0;
261 __cpu_stop_queue_work(stopper1, work1, &wakeq); 261 __cpu_stop_queue_work(stopper1, work1, &wakeq);
262 __cpu_stop_queue_work(stopper2, work2, &wakeq); 262 __cpu_stop_queue_work(stopper2, work2, &wakeq);
263 /*
264 * The waking up of stopper threads has to happen
265 * in the same scheduling context as the queueing.
266 * Otherwise, there is a possibility of one of the
267 * above stoppers being woken up by another CPU,
268 * and preempting us. This will cause us to n ot
269 * wake up the other stopper forever.
270 */
271 preempt_disable();
263unlock: 272unlock:
264 raw_spin_unlock(&stopper2->lock); 273 raw_spin_unlock(&stopper2->lock);
265 raw_spin_unlock_irq(&stopper1->lock); 274 raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ unlock:
271 } 280 }
272 281
273 if (!err) { 282 if (!err) {
274 preempt_disable();
275 wake_up_q(&wakeq); 283 wake_up_q(&wakeq);
276 preempt_enable(); 284 preempt_enable();
277 } 285 }