aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2011-10-18 16:03:48 -0400
committerIngo Molnar <mingo@elte.hu>2012-03-01 04:28:01 -0500
commit42c62a589f1ccbf38a02cb732231f9c2fccc5ab0 (patch)
tree80ba018cd3b581668f9c8dc75d52f9c2a9ee9f54 /kernel/sched/rt.c
parent7e4d960993331e92567f0180e45322a93e6780ba (diff)
sched/rt: Keep period timer ticking when rt throttling is active
When a runqueue is throttled we cannot disable the period timer because that timer is the only way to undo the throttling. We got stale throttling entries when a rq was throttled and then the global sysctl was disabled, which stopped the timer. Signed-off-by: Peter Zijlstra <peterz@infradead.org> [ Added changelog ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/n/tip-nuj34q52p6ro7szapuz84i0v@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f70206c2c802..6d1eb0be1870 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
778 778
779static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 779static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
780{ 780{
781 int i, idle = 1; 781 int i, idle = 1, throttled = 0;
782 const struct cpumask *span; 782 const struct cpumask *span;
783 783
784 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
785 return 1;
786
787 span = sched_rt_period_mask(); 784 span = sched_rt_period_mask();
788 for_each_cpu(i, span) { 785 for_each_cpu(i, span) {
789 int enqueue = 0; 786 int enqueue = 0;
@@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
818 if (!rt_rq_throttled(rt_rq)) 815 if (!rt_rq_throttled(rt_rq))
819 enqueue = 1; 816 enqueue = 1;
820 } 817 }
818 if (rt_rq->rt_throttled)
819 throttled = 1;
821 820
822 if (enqueue) 821 if (enqueue)
823 sched_rt_rq_enqueue(rt_rq); 822 sched_rt_rq_enqueue(rt_rq);
824 raw_spin_unlock(&rq->lock); 823 raw_spin_unlock(&rq->lock);
825 } 824 }
826 825
826 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
827 return 1;
828
827 return idle; 829 return idle;
828} 830}
829 831
@@ -884,7 +886,8 @@ static void update_curr_rt(struct rq *rq)
884 if (unlikely((s64)delta_exec < 0)) 886 if (unlikely((s64)delta_exec < 0))
885 delta_exec = 0; 887 delta_exec = 0;
886 888
887 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); 889 schedstat_set(curr->se.statistics.exec_max,
890 max(curr->se.statistics.exec_max, delta_exec));
888 891
889 curr->se.sum_exec_runtime += delta_exec; 892 curr->se.sum_exec_runtime += delta_exec;
890 account_group_exec_runtime(curr, delta_exec); 893 account_group_exec_runtime(curr, delta_exec);