aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 6d1eb0be1870..7f7e7cdcb472 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -857,8 +857,24 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
857 return 0; 857 return 0;
858 858
859 if (rt_rq->rt_time > runtime) { 859 if (rt_rq->rt_time > runtime) {
860 rt_rq->rt_throttled = 1; 860 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
861 printk_once(KERN_WARNING "sched: RT throttling activated\n"); 861
862 /*
863 * Don't actually throttle groups that have no runtime assigned
864 * but accrue some time due to boosting.
865 */
866 if (likely(rt_b->rt_runtime)) {
867 rt_rq->rt_throttled = 1;
868 printk_once(KERN_WARNING "sched: RT throttling activated\n");
869 } else {
870 /*
871 * In case we did anyway, make it go away,
872 * replenishment is a joke, since it will replenish us
873 * with exactly 0 ns.
874 */
875 rt_rq->rt_time = 0;
876 }
877
862 if (rt_rq_throttled(rt_rq)) { 878 if (rt_rq_throttled(rt_rq)) {
863 sched_rt_rq_dequeue(rt_rq); 879 sched_rt_rq_dequeue(rt_rq);
864 return 1; 880 return 1;