diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 43 |
1 files changed, 34 insertions, 9 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f42ae7fb5ec5..b60dad720173 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq) | |||
778 | 778 | ||
779 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | 779 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
780 | { | 780 | { |
781 | int i, idle = 1; | 781 | int i, idle = 1, throttled = 0; |
782 | const struct cpumask *span; | 782 | const struct cpumask *span; |
783 | 783 | ||
784 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) | ||
785 | return 1; | ||
786 | |||
787 | span = sched_rt_period_mask(); | 784 | span = sched_rt_period_mask(); |
788 | for_each_cpu(i, span) { | 785 | for_each_cpu(i, span) { |
789 | int enqueue = 0; | 786 | int enqueue = 0; |
@@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
818 | if (!rt_rq_throttled(rt_rq)) | 815 | if (!rt_rq_throttled(rt_rq)) |
819 | enqueue = 1; | 816 | enqueue = 1; |
820 | } | 817 | } |
818 | if (rt_rq->rt_throttled) | ||
819 | throttled = 1; | ||
821 | 820 | ||
822 | if (enqueue) | 821 | if (enqueue) |
823 | sched_rt_rq_enqueue(rt_rq); | 822 | sched_rt_rq_enqueue(rt_rq); |
824 | raw_spin_unlock(&rq->lock); | 823 | raw_spin_unlock(&rq->lock); |
825 | } | 824 | } |
826 | 825 | ||
826 | if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) | ||
827 | return 1; | ||
828 | |||
827 | return idle; | 829 | return idle; |
828 | } | 830 | } |
829 | 831 | ||
@@ -855,8 +857,30 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
855 | return 0; | 857 | return 0; |
856 | 858 | ||
857 | if (rt_rq->rt_time > runtime) { | 859 | if (rt_rq->rt_time > runtime) { |
858 | rt_rq->rt_throttled = 1; | 860 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
859 | printk_once(KERN_WARNING "sched: RT throttling activated\n"); | 861 | |
862 | /* | ||
863 | * Don't actually throttle groups that have no runtime assigned | ||
864 | * but accrue some time due to boosting. | ||
865 | */ | ||
866 | if (likely(rt_b->rt_runtime)) { | ||
867 | static bool once = false; | ||
868 | |||
869 | rt_rq->rt_throttled = 1; | ||
870 | |||
871 | if (!once) { | ||
872 | once = true; | ||
873 | printk_sched("sched: RT throttling activated\n"); | ||
874 | } | ||
875 | } else { | ||
876 | /* | ||
877 | * In case we did anyway, make it go away, | ||
878 | * replenishment is a joke, since it will replenish us | ||
879 | * with exactly 0 ns. | ||
880 | */ | ||
881 | rt_rq->rt_time = 0; | ||
882 | } | ||
883 | |||
860 | if (rt_rq_throttled(rt_rq)) { | 884 | if (rt_rq_throttled(rt_rq)) { |
861 | sched_rt_rq_dequeue(rt_rq); | 885 | sched_rt_rq_dequeue(rt_rq); |
862 | return 1; | 886 | return 1; |
@@ -884,7 +908,8 @@ static void update_curr_rt(struct rq *rq) | |||
884 | if (unlikely((s64)delta_exec < 0)) | 908 | if (unlikely((s64)delta_exec < 0)) |
885 | delta_exec = 0; | 909 | delta_exec = 0; |
886 | 910 | ||
887 | schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); | 911 | schedstat_set(curr->se.statistics.exec_max, |
912 | max(curr->se.statistics.exec_max, delta_exec)); | ||
888 | 913 | ||
889 | curr->se.sum_exec_runtime += delta_exec; | 914 | curr->se.sum_exec_runtime += delta_exec; |
890 | account_group_exec_runtime(curr, delta_exec); | 915 | account_group_exec_runtime(curr, delta_exec); |
@@ -1972,7 +1997,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
1972 | if (--p->rt.time_slice) | 1997 | if (--p->rt.time_slice) |
1973 | return; | 1998 | return; |
1974 | 1999 | ||
1975 | p->rt.time_slice = DEF_TIMESLICE; | 2000 | p->rt.time_slice = RR_TIMESLICE; |
1976 | 2001 | ||
1977 | /* | 2002 | /* |
1978 | * Requeue to the end of queue if we are not the only element | 2003 | * Requeue to the end of queue if we are not the only element |
@@ -2000,7 +2025,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) | |||
2000 | * Time slice is 0 for SCHED_FIFO tasks | 2025 | * Time slice is 0 for SCHED_FIFO tasks |
2001 | */ | 2026 | */ |
2002 | if (task->policy == SCHED_RR) | 2027 | if (task->policy == SCHED_RR) |
2003 | return DEF_TIMESLICE; | 2028 | return RR_TIMESLICE; |
2004 | else | 2029 | else |
2005 | return 0; | 2030 | return 0; |
2006 | } | 2031 | } |