diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 573e1ca01102..944cb68420e9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
788 | const struct cpumask *span; | 788 | const struct cpumask *span; |
789 | 789 | ||
790 | span = sched_rt_period_mask(); | 790 | span = sched_rt_period_mask(); |
791 | #ifdef CONFIG_RT_GROUP_SCHED | ||
792 | /* | ||
793 | * FIXME: isolated CPUs should really leave the root task group, | ||
794 | * whether they are isolcpus or were isolated via cpusets, lest | ||
795 | * the timer run on a CPU which does not service all runqueues, | ||
796 | * potentially leaving other CPUs indefinitely throttled. If | ||
797 | * isolation is really required, the user will turn the throttle | ||
798 | * off to kill the perturbations it causes anyway. Meanwhile, | ||
799 | * this maintains functionality for boot and/or troubleshooting. | ||
800 | */ | ||
801 | if (rt_b == &root_task_group.rt_bandwidth) | ||
802 | span = cpu_online_mask; | ||
803 | #endif | ||
791 | for_each_cpu(i, span) { | 804 | for_each_cpu(i, span) { |
792 | int enqueue = 0; | 805 | int enqueue = 0; |
793 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 806 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |