aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 573e1ca01102..e0b7ba9c040f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -691,6 +691,7 @@ balanced:
691 * runtime - in which case borrowing doesn't make sense. 691 * runtime - in which case borrowing doesn't make sense.
692 */ 692 */
693 rt_rq->rt_runtime = RUNTIME_INF; 693 rt_rq->rt_runtime = RUNTIME_INF;
694 rt_rq->rt_throttled = 0;
694 raw_spin_unlock(&rt_rq->rt_runtime_lock); 695 raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 raw_spin_unlock(&rt_b->rt_runtime_lock); 696 raw_spin_unlock(&rt_b->rt_runtime_lock);
696 } 697 }
@@ -788,6 +789,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
788 const struct cpumask *span; 789 const struct cpumask *span;
789 790
790 span = sched_rt_period_mask(); 791 span = sched_rt_period_mask();
792#ifdef CONFIG_RT_GROUP_SCHED
793 /*
794 * FIXME: isolated CPUs should really leave the root task group,
795 * whether they are isolcpus or were isolated via cpusets, lest
796 * the timer run on a CPU which does not service all runqueues,
797 * potentially leaving other CPUs indefinitely throttled. If
798 * isolation is really required, the user will turn the throttle
799 * off to kill the perturbations it causes anyway. Meanwhile,
800 * this maintains functionality for boot and/or troubleshooting.
801 */
802 if (rt_b == &root_task_group.rt_bandwidth)
803 span = cpu_online_mask;
804#endif
791 for_each_cpu(i, span) { 805 for_each_cpu(i, span) {
792 int enqueue = 0; 806 int enqueue = 0;
793 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 807 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);